repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
gardenifi/server
app/raspi/mqtt.py
[ { "identifier": "Services", "path": "app/raspi/services.py", "snippet": "class Services:\n \"\"\"\n The `Services` class provides various methods for managing and controlling\n services related to a Raspberry Pi device, such as turning on/off valves,\n storing and deleting program cycles, loading program cycles, discovering\n WiFi networks, and saving WiFi network configurations.\n \"\"\"\n\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._scheduler = BackgroundScheduler()\n self._scheduler_started = False\n\n @property\n def scheduler_started(self):\n \"\"\"getter\"\"\"\n return self._scheduler_started\n\n @scheduler_started.setter\n def scheduler_started(self, value):\n \"\"\"setter\"\"\"\n self._scheduler_started = value\n\n @property\n def scheduler(self):\n \"\"\"getter\"\"\"\n return self._scheduler\n\n @scheduler.setter\n def scheduler(self, value):\n \"\"\"setter\"\"\"\n self._scheduler = value\n\n def turn_on_from_program(self, valve):\n \"\"\"\n Turn on a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(2, \"out\" + str(valve))\n\n def turn_off_from_program(self, valve):\n \"\"\"\n Turn off a valve based on the program.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n None\n \"\"\"\n return Helpers().toggle(0, \"out\" + str(valve))\n\n def convert_to_utc(self, start_hour, tz_offset):\n \"\"\"\n Converts a given start hour in a specific time zone to Coordinated Universal Time (UTC).\n\n Args:\n start_hour (int): The starting hour in the local time zone.\n tz_offset (int): The time zone offset in hours. Positive values for time zones ahead of UTC,\n negative values for time zones behind UTC.\n\n Returns:\n Tuple[int, int]: A tuple containing the adjusted hour in UTC and the number of days passed.\n The adjusted hour is in the range [0, 23], and the days_passed is -1, 0, or 1\n indicating whether the adjusted hour falls before, within, or after the current day.\n\n Example:\n For a local start_hour of 10 and tz_offset of -5 (Eastern Standard Time),\n convert_to_utc(10, -5) may return (5, 0), indicating that the adjusted UTC hour is 5 with no days passed.\n\n Note:\n The method assumes a 24-hour clock format.\n \"\"\"\n logger.info(f\"Checking whether start_hour should change: {start_hour}, tz_offset: {tz_offset}\")\n # Calculate the adjusted hour\n adjusted_hour = start_hour - tz_offset\n if adjusted_hour <= 0:\n days_passed = -1\n elif adjusted_hour >= 24:\n days_passed = 1\n else:\n days_passed = 0\n adjusted_hour = adjusted_hour % 24\n return adjusted_hour, days_passed\n\n def get_previous_day(self, current_day):\n \"\"\"\n Returns the name of the previous day based on the given current day.\n\n Parameters:\n - current_day (str): The name of the current day (e.g., 'mon').\n\n Returns:\n str: The name of the previous day.\n \"\"\"\n # Find the index of the current day\n current_index = DAYS.index(current_day)\n # Calculate the index of the previous day\n previous_index = (current_index - 1) % len(DAYS)\n # Get the name of the previous day\n previous_day = DAYS[previous_index]\n return previous_day\n\n def get_next_day(self, current_day):\n \"\"\"\n Returns the name of the next day based on the given current day.\n\n Parameters:\n - current_day (str): The name of the current day (e.g., 'mon').\n\n Returns:\n str: The name of the next day.\n \"\"\"\n # Find the index of the current day\n current_index = DAYS.index(current_day)\n # Calculate the index of the next day\n next_index = (current_index + 1) % len(DAYS)\n # Get the name of the next day\n next_day = DAYS[next_index]\n return next_day\n\n def get_start_day_hour(self, day, start_hour, tz_offset):\n \"\"\"\n Checks if the start day or hour should be adjusted based on the provided conditions.\n\n Parameters:\n - day (str): The name of the current day (e.g., 'Monday').\n - start_hour (int): The original start hour (0 to 23).\n - tz_offset (int): The timezone offset in hours (-12 to +14).\n\n Returns:\n tuple: A tuple containing the adjusted day and start hour based on the provided conditions.\n \"\"\"\n logger.info(f\"Checking whether start_day should change: {day}\")\n # Convert start_hour to UTC (e.g. start_hour=0, tz_offset=2, start_hour=22)\n start_hour, days_passed = self.convert_to_utc(start_hour, tz_offset)\n if days_passed == 1:\n day = self.get_next_day(day)\n elif days_passed == -1:\n day = self.get_previous_day(day)\n logger.info(f\"new start_day: {day}\")\n logger.info(f\"new start_hour: {start_hour}\")\n return day, start_hour\n\n def get_stop_datetime(self, day, start_hour, start_min, period):\n \"\"\"\n Calculate the stop time for a program cycle.\n\n Parameters:\n - day (str): The day of the week.\n - start_hour (int): The starting hour.\n - start_min (int): The starting minute.\n - period (int): The duration of the cycle in minutes.\n\n Returns:\n tuple: A tuple containing the stop day, stop hour, and stop minute.\n \"\"\"\n logger.debug(f\"Converting to correct day, start, stop: {day}, {start_hour}, {start_min}, {period}\")\n stop_day_index = DAYS.index(day)\n logger.debug(f\"stop_day_index {stop_day_index}\")\n\n stop_min = (start_min + period) % 60\n logger.debug(f\"stop_min {stop_min}\")\n\n if stop_min < start_min:\n # should go to the next hour\n stop_hour = (start_hour + 1) % 24\n # should go to the next day\n if stop_hour < start_hour:\n stop_day_index = (stop_day_index + 1) % 7\n else:\n stop_hour = start_hour\n\n logger.debug(f\"stop_hour {stop_hour}\")\n\n stop_day = DAYS[stop_day_index]\n logger.debug(f\"stop_day: {stop_day}\")\n\n return stop_day, stop_hour, stop_min\n\n def store_program_cycles(self, json_data, store=False) -> None:\n \"\"\"\n Store program cycles and schedule them using the scheduler.\n\n Parameters:\n - json_data (dict): JSON data containing program information.\n - store (bool, optional): Whether to store the program information. Default is False.\n\n Returns:\n None\n \"\"\"\n try:\n triggers_to_start = []\n triggers_to_stop = []\n for day in json_data[\"days\"].split(\",\"):\n if day not in DAYS:\n raise DayValueException(f\"{day} is not correct! Accepted values: {DAYS}\")\n tz_offset = json_data[\"tz_offset\"]\n if not isinstance(tz_offset, int):\n raise TypeError(\"The variable tz_offset is not an integer: {tz_offset}\")\n\n # keeping day sent by user to use on every iteration of cycles\n user_day = day\n for cycle in json_data[\"cycles\"]:\n logger.info(f\"Cycle: {cycle}\")\n if int(cycle[\"min\"]) <= 0:\n logger.info(\"This cycle should not be considered to be in the program due to min <=0.\")\n continue\n start_hour = cycle[\"start\"].split(\":\")[0]\n start_min = cycle[\"start\"].split(\":\")[1]\n\n day, start_hour = self.get_start_day_hour(user_day, int(start_hour), tz_offset)\n\n logger.info(f\"Start: {day} at {start_hour}:{start_min}\")\n triggers_to_start.append(CronTrigger(day_of_week=day, hour=int(start_hour), minute=int(start_min)))\n\n stop_day, stop_hour, stop_min = self.get_stop_datetime(day, int(start_hour), int(start_min), int(cycle[\"min\"]))\n logger.info(f\"Stop: {stop_day} at {stop_hour}:{stop_min}\")\n triggers_to_stop.append(CronTrigger(day_of_week=stop_day, hour=stop_hour, minute=stop_min))\n\n logger.info(f\"FINAL Triggers To Start to be in the program:{triggers_to_start}\")\n logger.info(f\"FINAL Triggers To Stop to be in the program: {triggers_to_stop}\")\n\n self._scheduler.add_job(self.turn_on_from_program, OrTrigger(triggers_to_start), args=[json_data[\"out\"]])\n self._scheduler.add_job(self.turn_off_from_program, OrTrigger(triggers_to_stop), args=[json_data[\"out\"]])\n\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n\n if store is True:\n file_path = PROGRAM + str(json_data[\"out\"]) + PROGRAM_EXT\n with open(file_path, \"w\", encoding=\"utf-8\") as outfile:\n json.dump(json_data, outfile)\n outfile.close()\n\n except KeyError as kex:\n raise KeyError(f\"The {kex} field is missing in the JSON data.\") from kex\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def delete_program(self, valve) -> bool:\n \"\"\"\n Delete a stored program for a specific valve.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n bool: True if the program was deleted, False otherwise.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Looking for {file_path} to delete!\")\n if path.exists(file_path):\n logger.info(f\"{file_path} exists! Deleting it...\")\n remove(file_path)\n return True\n return False\n\n def load_program_cycles_if_exists(self, valve):\n \"\"\"\n Load program cycles for a valve if a stored program exists.\n\n Parameters:\n - valve (int): The valve number.\n\n Returns:\n dict or None: The loaded JSON data or None if no program exists.\n \"\"\"\n file_path = PROGRAM + str(valve) + PROGRAM_EXT\n logger.info(f\"Loading {file_path} if exists!\")\n json_data = None\n if path.exists(file_path):\n logger.info(f\"{file_path} exists!\")\n with open(file_path, encoding=\"utf-8\") as json_file:\n json_data = json.load(json_file)\n self.store_program_cycles(json_data)\n json_file.close()\n if not self._scheduler_started:\n self._scheduler.start()\n self._scheduler_started = True\n return json_data\n\n def split_json_into_chunks(self, selected_page, ap_array):\n \"\"\"\n Split a JSON array into chunks and create a response JSON.\n\n Parameters:\n - selected_page (int): The requested page number.\n - ap_array (list): The array to be split.\n\n Returns:\n dict: The response JSON containing the specified page and network information.\n \"\"\"\n selected_page = int(selected_page)\n json_response = {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"page\": selected_page,\n \"nets\": {},\n \"pages\": 0,\n }\n json_response_to_send = json_response.copy()\n\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n logger.debug(f\"Initial JSON response headers size: {headers_size} bytes\")\n\n pages = 1\n current_chunk_size = headers_size\n json_array = []\n\n for item in ap_array:\n json_response[\"pages\"] = pages\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n item_size = len(json.dumps(item).encode(\"utf-8\"))\n logger.debug(\n \"JSON item size: \"\n + f\"{item_size} bytes, \"\n + \"current_chunk_size: \"\n + f\"{current_chunk_size} bytes, \"\n + \"total: \"\n + f\"{current_chunk_size + item_size} bytes\"\n )\n if current_chunk_size + item_size >= MAX_NUM_OF_BYTES_CHUNK - MAX_NUM_OF_BUFFER_TO_ADD:\n pages += 1\n json_response[\"pages\"] = pages\n json_array = [item]\n json_response[\"nets\"] = json_array\n headers_size = len(json.dumps(json_response).encode(\"utf-8\"))\n current_chunk_size = headers_size + item_size + len(\", \")\n logger.debug(\n f\"Found total >= {MAX_NUM_OF_BYTES_CHUNK}: \"\n f\"Creating a new page: {pages}. \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n else:\n json_array.append(item)\n current_chunk_size += item_size + len(\", \")\n if selected_page == pages:\n json_response_to_send[\"nets\"] = json_array\n\n json_response_to_send[\"pages\"] = pages\n logger.debug(f\"JSON response size: {headers_size}\")\n logger.debug(\n f\"Nets array for this page ({pages}): {json_array}. \"\n f\"Current nets array size: {len(json.dumps(json_array).encode('utf-8'))} bytes, \"\n f\"Current chunk size: {current_chunk_size} bytes\"\n )\n\n if not json_response[\"nets\"]:\n json_response_to_send[\"nets\"] = json_array\n\n logger.debug(f\"JSON total size: {len(json.dumps(json_response_to_send).encode('utf-8'))}\")\n return json_response_to_send\n\n def discover_wifi_networks(self, chunked=0, page=1, refresh_networks_file=False):\n \"\"\"\n Discover available WiFi networks and return the information.\n\n Parameters:\n - chunked (int, optional): Whether to split the response into chunks. Default is 0.\n - page (int, optional): The requested page number. Default is 1.\n - refresh_networks_file (bool, optional): Whether to refresh the networks file. Default is False.\n\n Returns:\n str or dict: The JSON response containing WiFi network information.\n \"\"\"\n try:\n if page > 1:\n refresh_networks_file = False\n json_response = {}\n ap_array = []\n retries = 0\n while retries < 30:\n retries = retries + 1\n ap_array = Helpers().scan_rpi_wifi_networks(refresh_networks_file)\n if len(ap_array) != 0:\n break\n\n json_response = json.dumps(\n {\n \"hw_id\": RPI_HW_ID,\n \"mqtt_broker\": {\"host\": MQTT_HOST, \"port\": int(MQTT_PORT), \"user\": MQTT_USER, \"pass\": MQTT_PASS},\n \"ap_array\": ap_array,\n }\n )\n\n logger.info(f\"json_response: {json_response}\")\n if chunked == 0:\n return json_response\n logger.info(f\"Split array into chunks of {MAX_NUM_OF_BYTES_CHUNK} bytes...\")\n json_response = self.split_json_into_chunks(page, ap_array)\n return json_response\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network(self, ssid, wifi_key):\n \"\"\"\n Save WiFi network information.\n\n Parameters:\n - request_data (dict): The request data containing WiFi network information.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n if ssid and wifi_key:\n Helpers().store_wpa_ssid_key(ssid, wifi_key)\n return \"OK\"\n raise ValueError(\"Error: You need to provide ssid and wifi_keys in POST data\")\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def save_wifi_network_with_wpa(self, wpa_enabled, wpa_key):\n \"\"\"\n Save WiFi network information with WPA settings.\n\n Parameters:\n - request_params (dict): The request parameters containing WPA settings.\n\n Returns:\n str: \"OK\" if successful, \"NOT_OK\" otherwise.\n \"\"\"\n try:\n if ARCH == \"arm\":\n logger.info(f\"wpa_enabled: {wpa_enabled}, wpa_key: {wpa_key}\")\n if str(wpa_enabled) == \"1\":\n Helpers().update_wpa_supplicant(1, wpa_key)\n else:\n Helpers().update_wpa_supplicant(0, wpa_key)\n\n thread = Thread(target=Helpers().sleep_and_reboot_for_wpa)\n thread.start()\n return \"OK\"\n raise TypeError(f\"{ARCH} architecture is not supported!!!\")\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise" }, { "identifier": "MQTT_CLIENT_ID", "path": "app/raspi/const.py", "snippet": "MQTT_CLIENT_ID = \"RaspirriV1-MQTT-Client\" + str(uuid.uuid4())" }, { "identifier": "MQTT_TOPIC_STATUS", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_STATUS = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_STATUS\", \"/status\")" }, { "identifier": "MQTT_TOPIC_METADATA", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_METADATA = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_METADATA\", \"/metadata\")" }, { "identifier": "MQTT_TOPIC_CONFIG", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_CONFIG = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_CONFIG\", \"/config\")" }, { "identifier": "MQTT_TOPIC_CMD", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_CMD = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_CMD\", \"/command\")" }, { "identifier": "MQTT_TOPIC_VALVES", "path": "app/raspi/const.py", "snippet": "MQTT_TOPIC_VALVES = MQTT_TOPIC_BASE + load_env_variable(\"MQTT_TOPIC_VALVES\", \"/valves\")" }, { "identifier": "MQTT_STATUS_ERR", "path": "app/raspi/const.py", "snippet": "MQTT_STATUS_ERR = '{\"sts\": 1, \"err\": '" }, { "identifier": "PROGRAM", "path": "app/raspi/const.py", "snippet": "PROGRAM = \"program_\"" }, { "identifier": "PROGRAM_EXT", "path": "app/raspi/const.py", "snippet": "PROGRAM_EXT = \".json\"" }, { "identifier": "MQTT_STATUS_OK", "path": "app/raspi/const.py", "snippet": "MQTT_STATUS_OK = '{\"sts\": 0, \"res\": '" }, { "identifier": "MQTT_OK", "path": "app/raspi/const.py", "snippet": "MQTT_OK = '\"OK\"'" }, { "identifier": "MQTT_END", "path": "app/raspi/const.py", "snippet": "MQTT_END = \"}\"" }, { "identifier": "MQTT_USER", "path": "app/raspi/const.py", "snippet": "MQTT_USER = load_env_variable(\"MQTT_USER\", \"user\")" }, { "identifier": "MQTT_PASS", "path": "app/raspi/const.py", "snippet": "MQTT_PASS = load_env_variable(\"MQTT_PASS\", \"pass\")" }, { "identifier": "MQTT_HOST", "path": "app/raspi/const.py", "snippet": "MQTT_HOST = load_env_variable(\"MQTT_HOST\", \"localhost\")" }, { "identifier": "MQTT_PORT", "path": "app/raspi/const.py", "snippet": "MQTT_PORT = load_env_variable(\"MQTT_PORT\", \"1883\")" }, { "identifier": "Helpers", "path": "app/raspi/helpers.py", "snippet": "class Helpers:\n \"\"\"\n The `Helpers` class provides various helper methods for performing tasks\n such as setting valves, getting system information, storing and loading\n objects to/from files, managing WiFi networks, and updating the `wpa_supplicant.conf` file.\n \"\"\"\n\n __instance = None\n __lock = threading.Lock()\n\n def __new__(cls):\n \"\"\"\n Create a new instance of the Helpers class using the singleton design pattern.\n\n Returns:\n An instance of the Helpers class.\n\n Example Usage:\n instance = Helpers()\n \"\"\"\n if cls.__instance is None:\n with cls.__lock:\n cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n return cls.__instance\n\n @classmethod\n def destroy_instance(cls):\n \"\"\"\n Destroy the instance of the Helpers class.\n\n This method sets the instance of the Helpers class to None, effectively destroying the instance.\n\n Example Usage:\n ```python\n instance = Helpers() # Create an instance of the Helpers class\n Helpers.destroy_instance() # Destroy the instance\n print(instance) # Output: None\n ```\n\n Inputs:\n None\n\n Outputs:\n None\n \"\"\"\n cls.__instance = None\n cls._toggle_statuses = {}\n cls._ap_array = []\n cls._is_connected_to_inet = False\n\n @property\n def toggle_statuses(self):\n \"\"\"\n Getter method for the toggle_statuses property.\n\n Returns:\n dict: A dictionary containing toggle statuses.\n\n Example:\n Access toggle statuses using `instance.toggle_statuses`.\n \"\"\"\n return self._toggle_statuses\n\n @toggle_statuses.setter\n def toggle_statuses(self, value):\n \"\"\"\n Setter method for the toggle_statuses property.\n\n Args:\n value (dict): A dictionary containing toggle statuses to set.\n\n Example:\n Set toggle statuses using `instance.toggle_statuses = new_statuses`.\n \"\"\"\n self._toggle_statuses = value\n\n @property\n def ap_array(self):\n \"\"\"\n Getter method for the _ap_array property.\n\n Returns:\n An array of wifi networks\n\n Example:\n Access toggle statuses using `instance.ap_array`.\n \"\"\"\n return self._ap_array\n\n @ap_array.setter\n def ap_array(self, value):\n \"\"\"\n Setter method for the _ap_array property.\n\n Args:\n value (dict): An array containing the wifi networks to set.\n\n Example:\n Set toggle statuses using `instance.ap_array = new_ap_array`.\n \"\"\"\n self._ap_array = value\n\n def set_valves(self, valves):\n \"\"\"\n Set valve statuses in the toggle_statuses dictionary.\n\n Args:\n valves (str or dict): A string or dictionary representing valve statuses.\n\n Example:\n instance.set_valves('{\"valve1\": true, \"valve2\": false}')\n \"\"\"\n try:\n if isinstance(valves, str):\n valves = ast.literal_eval(valves)\n else:\n valves = ast.literal_eval(str(valves))\n self._toggle_statuses[\"valves\"] = valves\n except Exception as exception:\n logger.error(f\"Error in set_valves: {exception}\")\n raise\n\n def extract_local_ip(self):\n \"\"\"\n Extract the local IP address of the device.\n\n Returns:\n str: The local IP address.\n\n Example:\n local_ip = instance.extract_local_ip()\n \"\"\"\n tcp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n tcp_sock.connect((\"8.8.8.8\", 1))\n ip_address = tcp_sock.getsockname()[0]\n except Exception:\n ip_address = \"127.0.0.1\"\n finally:\n tcp_sock.close()\n return ip_address\n\n def get_uptime(self):\n \"\"\"\n Get the system uptime.\n\n Returns:\n str: The system uptime.\n\n Example:\n uptime = instance.get_uptime()\n \"\"\"\n try:\n result = subprocess.run([\"uptime\", \"-p\"], stdout=subprocess.PIPE, text=True, check=True)\n return result.stdout.replace(\"\\n\", \"\")\n except Exception as e:\n logger.error(f\"Error retrieving uptime: {e}\")\n return str(e)\n\n def get_git_commit_id(self):\n \"\"\"\n Get the Git commit ID of the current project.\n\n Returns:\n str: The Git commit ID.\n\n Example:\n commit_id = instance.get_git_commit_id()\n \"\"\"\n # Open the file in read mode ('r')\n try:\n with open(RPI_SERVER_GIT_COMMIT, encoding=\"utf-8\") as file:\n # Read the entire content of the file\n content = file.read().replace(\"\\n\", \"\")\n logger.debug(f\"File content: {content}\")\n return content\n except FileNotFoundError as e:\n logger.error(f\"The file '{RPI_SERVER_GIT_COMMIT}' does not exist.\")\n return str(e)\n except Exception as e:\n traceback.print_exc()\n logger.error(f\"Error retrieving git log: {e}\")\n return str(e)\n\n def store_object_to_file(self, filename, local_object):\n \"\"\"\n Store a local object to a file using pickle.\n\n Args:\n filename (str): The name of the file to store the object.\n local_object (object): The object to be stored.\n\n Example:\n instance.store_object_to_file('data.pkl', data)\n \"\"\"\n try:\n with open(filename, \"wb\") as obj_file:\n pickle.dump(local_object, obj_file)\n logger.info(f\"Stored local object file: {filename}: {local_object}\")\n obj_file.close()\n return local_object\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def store_toggle_statuses_to_file(self):\n \"\"\"\n Store toggle statuses to a file.\n\n Returns:\n dict: The toggle statuses being stored.\n\n Example:\n stored_statuses = instance.store_toggle_statuses_to_file()\n \"\"\"\n return self.store_object_to_file(STATUSES_FILE, self._toggle_statuses)\n\n def store_wifi_networks_to_file(self):\n \"\"\"\n Store WiFi networks to a file.\n\n Returns:\n list: The WiFi networks being stored.\n\n Example:\n stored_networks = instance.store_wifi_networks_to_file()\n \"\"\"\n return self.store_object_to_file(NETWORKS_FILE, self._ap_array)\n\n def load_object_from_file(self, filename):\n \"\"\"\n Load a local object from a file using pickle.\n\n Args:\n filename (str): The name of the file to load the object from.\n\n Returns:\n object: The loaded object.\n\n Example:\n loaded_object = instance.load_object_from_file('data.pkl')\n \"\"\"\n try:\n local_obj = {}\n with open(filename, \"rb\") as obj_file:\n local_obj = pickle.load(obj_file)\n logger.info(f\"Loaded local object file: {filename}: {local_obj}\")\n obj_file.close()\n return local_obj\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n self.store_object_to_file(filename, local_obj)\n return local_obj\n\n def load_toggle_statuses_from_file(self):\n \"\"\"\n Load toggle statuses from a file and update the instance's _toggle_statuses attribute.\n \"\"\"\n self._toggle_statuses = self.load_object_from_file(STATUSES_FILE)\n\n def load_wifi_networks_from_file(self):\n \"\"\"\n Load WiFi networks from a file and update the instance's _ap_array attribute.\n \"\"\"\n self._ap_array = self.load_object_from_file(NETWORKS_FILE)\n\n def get_timezone(self):\n \"\"\"\n Get the system timezone.\n\n Returns:\n str: The system timezone.\n\n Example:\n timezone = instance.get_timezone()\n \"\"\"\n return str(time.tzname[time.daylight])\n\n def check_empty_toggle(self, valve):\n \"\"\"\n Check if a toggle status is empty for a specific valve and set a default value if it is.\n\n Args:\n valve (str): The name of the valve.\n\n Example:\n instance.check_empty_toggle(\"out1\")\n \"\"\"\n if self._toggle_statuses.get(valve) is None:\n self._toggle_statuses[valve] = 0\n self._toggle_statuses[valve] = self.set_gpio_outputs(self._toggle_statuses[valve], valve)\n\n def get_toggle_statuses(self):\n \"\"\"\n Get and update toggle statuses, system information, and store them to a file.\n\n Returns:\n dict: The updated toggle statuses.\n\n Example:\n updated_statuses = instance.get_toggle_statuses()\n \"\"\"\n if \"valves\" not in self._toggle_statuses:\n self.set_valves([])\n\n self.check_empty_toggle(\"out1\")\n self.check_empty_toggle(\"out2\")\n self.check_empty_toggle(\"out3\")\n self.check_empty_toggle(\"out4\")\n\n self._toggle_statuses[\"server_time\"] = str(datetime.now().strftime(\"%Y/%m/%d %H:%M:%S\"))\n self._toggle_statuses[\"tz\"] = self.get_timezone()\n self._toggle_statuses[\"hw_id\"] = RPI_HW_ID\n\n logger.info(f\"Valves statuses:{self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n\n return self._toggle_statuses\n\n def set_gpio_outputs(self, status, valve):\n \"\"\"\n Set GPIO outputs for a specified valve.\n\n Args:\n status (int): The status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n int: The modified status.\n\n Example:\n modified_status = instance.set_gpio_outputs(1, \"out1\")\n \"\"\"\n status = bool(status in (1, 2))\n logger.info(f\"Set Output of Valve: {valve}::{status}\")\n if ARCH == \"arm\":\n if valve == \"out2\":\n logger.info(f\"===========> Setting PIN 11 GPIO.output...{status}\")\n # RuntimeError: Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)\n GPIO.output(11, status)\n logger.info(f\"===========> PIN 11 Status GPIO.input: {GPIO.input(11)}\")\n return 1 if status is True else 0\n\n def toggle(self, status, valve):\n \"\"\"\n Toggle a valve, set GPIO outputs, update toggle statuses, and store them to a file.\n\n Args:\n status (int): The new status to be set (0 or 1).\n valve (str): The name of the valve.\n\n Returns:\n str: A confirmation message.\n\n Example:\n confirmation = instance.toggle(1, \"out1\")\n \"\"\"\n status = self.set_gpio_outputs(status, valve)\n self._toggle_statuses[valve] = status\n logger.info(f\"Modified valves statuses: {self._toggle_statuses}\")\n self.store_toggle_statuses_to_file()\n return \"OK\"\n\n @property\n def is_connected_to_inet(self):\n \"\"\"\n Get the current internet connection status.\n\n Returns:\n bool: True if connected, False otherwise.\n\n Example:\n connection_status = instance.is_connected_to_inet()\n \"\"\"\n return self._is_connected_to_inet\n\n @is_connected_to_inet.setter\n def is_connected_to_inet(self, value):\n \"\"\"\n Set the current internet connection status.\n\n Returns:\n None\n\n Example:\n instance.is_connected_to_inet = connection_status\n \"\"\"\n self._is_connected_to_inet = value\n\n def system_reboot(self):\n \"\"\"\n Reboot the system after a 2-second delay.\n \"\"\"\n logger.info(\"Rebooting in 2 seconds...\")\n time.sleep(2)\n try:\n subprocess.run([\"reboot\"], stdout=subprocess.PIPE, text=True, check=True)\n except Exception as e:\n logger.error(f\"Error rebooting: {e}\")\n\n def system_update(self):\n \"\"\"\n Update the system through git.\n \"\"\"\n logger.info(\"Git update code and restart...\")\n try:\n subprocess.run([\"/usr/bin/git\", \"pull\"], stdout=subprocess.PIPE, text=True, check=True)\n os.kill(os.getpid(), signal.SIGTERM)\n except Exception as e:\n logger.error(f\"Error updating git: {e}\")\n\n def checking_for_duplicate_ssids(self, ssid, ap_array):\n \"\"\"\n Check for duplicate SSIDs in the list of WiFi networks.\n\n Args:\n ssid (str): The SSID to check.\n ap_array (list): The list of WiFi networks.\n\n Returns:\n bool: True if a duplicate is found, False otherwise.\n\n Example:\n is_duplicate = instance.checking_for_duplicate_ssids(\"MyWiFi\", wifi_networks)\n \"\"\"\n for wifi in ap_array:\n if wifi[\"ssid\"] == ssid:\n return True\n return False\n\n def scan_rpi_wifi_networks(self, refresh=False):\n \"\"\"\n Scan for available WiFi networks and update the instance's _ap_array attribute.\n\n Args:\n refresh (bool): If True, force a refresh of the WiFi networks list.\n\n Returns:\n list: The updated list of WiFi networks.\n\n Example:\n wifi_networks = instance.scan_rpi_wifi_networks()\n \"\"\"\n self._ap_array = []\n index = 0\n if not os.path.exists(NETWORKS_FILE):\n refresh = True\n if refresh:\n if ARCH == \"arm\":\n with subprocess.Popen([\"iwlist\", \"scan\"], stdout=subprocess.PIPE) as iwlist_raw:\n ap_list, err = iwlist_raw.communicate()\n if err is not None:\n logger.error(f\"Popen error: {err}\")\n return self._ap_array\n logger.debug(f\"iwlist scan command output: {ap_list}\")\n for line in ap_list.decode(\"utf-8\").rsplit(\"\\n\"):\n logger.debug(f\"Line: {line}\")\n if \"ESSID\" in line:\n ap_ssid = line[27:-1]\n if ap_ssid != \"\" and not self.checking_for_duplicate_ssids(ap_ssid, self._ap_array):\n index += 1\n logger.info(f\"id = {index}, ssid = {ap_ssid}\")\n wifi_network = {\"id\": index, \"ssid\": str(ap_ssid)}\n self._ap_array.append(json.loads(json.dumps(wifi_network)))\n self.store_wifi_networks_to_file()\n else:\n self._ap_array = []\n else:\n self.load_wifi_networks_from_file()\n\n return self._ap_array\n\n def store_wpa_ssid_key(self, ssid, wifi_key):\n \"\"\"\n Store the WPA SSID and key, and update the WPA supplicant configuration.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if the update is successful, False otherwise.\n\n Example:\n success = instance.store_wpa_ssid_key(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n logger.info(f\"ssid: {ssid}, wifi_key: {wifi_key}\")\n return self.update_wpa_supplicant(ssid, wifi_key)\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def is_raspberry_pi_zero(self):\n \"\"\"\n Check whether we're hosted in an RPi Zero or not.\n \"\"\"\n try:\n with open(\"/proc/cpuinfo\", encoding=\"utf8\") as cpuinfo:\n for line in cpuinfo:\n if line.startswith(\"Model\"):\n model_info = line.strip().split(\":\")\n model_name = model_info[1].strip()\n return \"Raspberry Pi Zero\" in model_name\n return False\n except FileNotFoundError as fnfex:\n logger.error(f\"Error: {fnfex}\")\n return False\n\n def write_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Write the WPA supplicant configuration to a temporary file.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n \"\"\"\n with open(WPA_SUPL_CONF_TMP, \"w\", encoding=\"utf8\") as temp_conf_file:\n temp_conf_file.write(\"ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev\\n\")\n temp_conf_file.write(\"update_config=1\\n\")\n temp_conf_file.write(\"\\n\")\n temp_conf_file.write(\"network={\\n\")\n temp_conf_file.write('\tssid=\"' + str(ssid) + '\"\\n')\n if wifi_key == \"\":\n temp_conf_file.write(\"\tkey_mgmt=NONE\\n\")\n else:\n temp_conf_file.write('\tpsk=\"' + str(wifi_key) + '\"\\n')\n temp_conf_file.write(\"}\\n\")\n temp_conf_file.close()\n\n def get_wireless_interface(self):\n \"\"\"\n Get the wireless interface name of the device.\n\n Returns:\n str: The wireless interface name.\n\n Example:\n interface_name = instance.get_wireless_interface()\n \"\"\"\n try:\n ifconfig_output = subprocess.check_output([\"ifconfig\"]).decode(\"utf-8\")\n wireless_interfaces = re.findall(r\"wlan[0-9]+\", ifconfig_output)\n if wireless_interfaces:\n return wireless_interfaces[0]\n except subprocess.CalledProcessError as ex:\n logger.error(f\"Error: {ex}\")\n raise\n return None\n\n def update_wpa_supplicant(self, ssid, wifi_key):\n \"\"\"\n Update the WPA supplicant configuration and check for internet connectivity.\n\n Args:\n ssid (str): The SSID of the WiFi network.\n wifi_key (str): The key/password of the WiFi network.\n\n Returns:\n bool: True if connected to the internet after the update, False otherwise.\n\n Example:\n connected = instance.update_wpa_supplicant(\"MyWiFi\", \"MyPassword\")\n \"\"\"\n try:\n self._is_connected_to_inet = False\n if RUNNING_UNIT_TESTS and ssid == DUMMY_SSID and wifi_key == DUMMY_PASSKEY:\n return True\n # In case of Raspberry Pi Zero NetworkManager stucks. So let's go with the wap_supplicant\n # modification approach.\n if self.is_raspberry_pi_zero():\n self.write_wpa_supplicant(ssid, wifi_key)\n os.system(\n \"cp /etc/wpa_supplicant/wpa_supplicant.conf \\\n /etc/wpa_supplicant/wpa_supplicant.conf.bak\"\n )\n os.system(\"cp \" + WPA_SUPL_CONF_TMP + \" /etc/wpa_supplicant/wpa_supplicant.conf\")\n wpa_cli_cmd = \"sudo wpa_cli -i wlan0 reconfigure\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command {wpa_cli_cmd}:{output.decode('utf8')}\")\n else:\n wpa_cli_cmd = f\"sudo nmcli device wifi connect {ssid} password {wifi_key}\"\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}:{output.decode('utf8')}`\")\n\n wireless_interface = self.get_wireless_interface()\n logger.info(f\"wireless_interface `{wireless_interface}`\")\n wpa_cli_cmd = f\"wpa_cli -i {wireless_interface} status | grep state | cut -d'=' -f2\"\n logger.info(f\"Command to run: `{wpa_cli_cmd}`\")\n retries = 0\n while retries < 30:\n retries = retries + 1\n output = subprocess.check_output(wpa_cli_cmd, shell=True)\n logger.info(f\"Output of command `{wpa_cli_cmd}`:{output.decode('utf8')}\")\n if str(output.decode(\"utf8\")) == \"COMPLETED\\n\":\n self._is_connected_to_inet = True\n else:\n time.sleep(2)\n\n logger.info(f\"Connected to internet: {self._is_connected_to_inet}\")\n return self._is_connected_to_inet\n except Exception as exception:\n logger.error(f\"Error: {exception}\")\n raise\n\n def sleep_and_reboot_for_wpa(self):\n \"\"\"\n Sleep for a short period and then reboot the system.\n \"\"\"\n self.system_reboot()" }, { "identifier": "Command", "path": "app/raspi/const.py", "snippet": "class Command(Enum):\n \"\"\"Supported Commands Enumerator.\"\"\"\n\n TURN_OFF_VALVE = 0\n TURN_ON_VALVE = 1\n SEND_PROGRAM = 2\n SEND_TIMEZONE = 3\n REBOOT_RPI = 4\n DELETE_PROGRAM = 5\n UPDATE_RPI = 6" } ]
import time import os import json import threading import sys import paho.mqtt.client as mqtt from threading import Thread from loguru import logger from app.raspi.services import Services from app.raspi.const import ( MQTT_CLIENT_ID, MQTT_TOPIC_STATUS, MQTT_TOPIC_METADATA, MQTT_TOPIC_CONFIG, MQTT_TOPIC_CMD, MQTT_TOPIC_VALVES, MQTT_STATUS_ERR, PROGRAM, PROGRAM_EXT, MQTT_STATUS_OK, MQTT_OK, MQTT_END, MQTT_USER, MQTT_PASS, MQTT_HOST, MQTT_PORT, ) from app.raspi.helpers import Helpers from app.raspi.const import Command
11,320
Inputs: None Outputs: None """ logger.debug(f"Destroying Mqtt Object Class: {cls.__instance}") cls.__instance = None cls._mqtt_thread = None cls._periodic_updates_thread = None def get_mqtt_thread(self): """Getter.""" logger.debug(f"Getting current thread: {self._mqtt_thread}") return self._mqtt_thread def set_mqtt_thread(self, mqtt_thread): """Setter.""" logger.debug(f"Setting new thread: {mqtt_thread}") self._mqtt_thread = mqtt_thread def get_periodic_updates_thread(self): """Getter.""" return self._periodic_updates_thread def set_periodic_updates_thread(self, periodic_updates_thread): """Setter.""" self._periodic_updates_thread = periodic_updates_thread def is_running(self): """Check whether mqtt thread state.""" # logger.info(str(mqtt_thread)) # logger.info(str(mqtt_thread is not None)) # logger.info(str(mqtt_thread.is_alive())) return self._mqtt_thread is not None and self._mqtt_thread.is_alive() @staticmethod def on_disconnect(client, data, return_code=0): """OnDisconnect callback.""" logger.debug(f"MQTT OnDisconnect: {client}:{data}:{return_code}") # The callback for when the client # receives a CONNACK response from the server. @staticmethod def on_connect(client, userdata, flags, return_code): """OnConnect callback.""" logger.debug(f"MQTT OnConnect: {client}:{userdata}:{flags}:{return_code}") client.connected_flag = True # subscribe to the RASPIRRI TOPICS logger.debug( f"MQTT OnConnect: Subscribing to topics:\ {MQTT_TOPIC_STATUS},\ {MQTT_TOPIC_CONFIG},\ {MQTT_TOPIC_CMD},\ {MQTT_TOPIC_VALVES}" ) client.subscribe(MQTT_TOPIC_STATUS) client.subscribe(MQTT_TOPIC_CONFIG) client.subscribe(MQTT_TOPIC_CMD) client.subscribe(MQTT_TOPIC_VALVES) if return_code == 0: logger.info("Connected successfully") Helpers().load_toggle_statuses_from_file() if Mqtt().get_periodic_updates_thread() is None: Mqtt().set_periodic_updates_thread( Thread(daemon=True, name="PeriodicUpdatesThread", target=Mqtt.send_periodic_updates, args=(client,)) ) Mqtt().get_periodic_updates_thread().start() else: logger.info(f"Connect returned result code: {return_code}") @staticmethod def handle_valves(client, data): """Handle valves.""" try: logger.info(f"valves data received={data}") Helpers().set_valves(data) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) # Program Configuration handler # 1. It should parse the configuration as a JSON string # 2. If it is correct it should store it as a local file # 3. A scheduler should launch to turn on the irrigator for every cycle @staticmethod def handle_config(client, data): """Handle cfg.""" try: json_data = json.loads(data) logger.info(f"prestored programs={json_data}") for program in json_data: logger.info(f"program={program}") if program == {}: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) return Services().store_program_cycles(program, True) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def handle_command(client, data): """Handle cmd.""" try: json_data = json.loads(data) logger.info(json_data) cmd = json_data["cmd"] command = Command(cmd) try: valve = json_data["out"] except Exception as exception: logger.warning( f"Could not find valve out parameter. \ Will use valve 1: {exception}" ) valve = 1
"""MIT License Copyright (c) 2023, Marios Karagiannopoulos Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. **Attribution Requirement:** When using or distributing the software, an attribution to Marios Karagiannopoulos must be included. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class Mqtt: """MQTT Methods Class.""" __instance = None __lock = threading.Lock() client = None def __new__(cls): """ Create a new instance of the Mqtt class using the singleton design pattern. Returns: An instance of the Mqtt class. Example Usage: instance = Mqtt() """ if cls.__instance is None: with cls.__lock: cls.__instance = super().__new__(cls) # pylint: disable=duplicate-code cls._mqtt_thread = None cls._periodic_updates_thread = None logger.debug(f"Returning Mqtt Object Class: {cls.__instance}") return cls.__instance @classmethod def destroy_instance(cls): """ Destroy the instance of the Mqtt class. This method sets the instance of the Mqtt class to None, effectively destroying the instance. Example Usage: ```python instance = Mqtt() # Create an instance of the Mqtt class Mqtt.destroy_instance() # Destroy the instance print(instance) # Output: None ``` Inputs: None Outputs: None """ logger.debug(f"Destroying Mqtt Object Class: {cls.__instance}") cls.__instance = None cls._mqtt_thread = None cls._periodic_updates_thread = None def get_mqtt_thread(self): """Getter.""" logger.debug(f"Getting current thread: {self._mqtt_thread}") return self._mqtt_thread def set_mqtt_thread(self, mqtt_thread): """Setter.""" logger.debug(f"Setting new thread: {mqtt_thread}") self._mqtt_thread = mqtt_thread def get_periodic_updates_thread(self): """Getter.""" return self._periodic_updates_thread def set_periodic_updates_thread(self, periodic_updates_thread): """Setter.""" self._periodic_updates_thread = periodic_updates_thread def is_running(self): """Check whether mqtt thread state.""" # logger.info(str(mqtt_thread)) # logger.info(str(mqtt_thread is not None)) # logger.info(str(mqtt_thread.is_alive())) return self._mqtt_thread is not None and self._mqtt_thread.is_alive() @staticmethod def on_disconnect(client, data, return_code=0): """OnDisconnect callback.""" logger.debug(f"MQTT OnDisconnect: {client}:{data}:{return_code}") # The callback for when the client # receives a CONNACK response from the server. @staticmethod def on_connect(client, userdata, flags, return_code): """OnConnect callback.""" logger.debug(f"MQTT OnConnect: {client}:{userdata}:{flags}:{return_code}") client.connected_flag = True # subscribe to the RASPIRRI TOPICS logger.debug( f"MQTT OnConnect: Subscribing to topics:\ {MQTT_TOPIC_STATUS},\ {MQTT_TOPIC_CONFIG},\ {MQTT_TOPIC_CMD},\ {MQTT_TOPIC_VALVES}" ) client.subscribe(MQTT_TOPIC_STATUS) client.subscribe(MQTT_TOPIC_CONFIG) client.subscribe(MQTT_TOPIC_CMD) client.subscribe(MQTT_TOPIC_VALVES) if return_code == 0: logger.info("Connected successfully") Helpers().load_toggle_statuses_from_file() if Mqtt().get_periodic_updates_thread() is None: Mqtt().set_periodic_updates_thread( Thread(daemon=True, name="PeriodicUpdatesThread", target=Mqtt.send_periodic_updates, args=(client,)) ) Mqtt().get_periodic_updates_thread().start() else: logger.info(f"Connect returned result code: {return_code}") @staticmethod def handle_valves(client, data): """Handle valves.""" try: logger.info(f"valves data received={data}") Helpers().set_valves(data) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) # Program Configuration handler # 1. It should parse the configuration as a JSON string # 2. If it is correct it should store it as a local file # 3. A scheduler should launch to turn on the irrigator for every cycle @staticmethod def handle_config(client, data): """Handle cfg.""" try: json_data = json.loads(data) logger.info(f"prestored programs={json_data}") for program in json_data: logger.info(f"program={program}") if program == {}: Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) return Services().store_program_cycles(program, True) Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_OK + MQTT_OK + MQTT_END) except Exception as exception: logger.error(f"Error: {exception}") Mqtt.publish_to_topic(client, MQTT_TOPIC_STATUS, MQTT_STATUS_ERR + str(exception)[0:128] + MQTT_END) @staticmethod def handle_command(client, data): """Handle cmd.""" try: json_data = json.loads(data) logger.info(json_data) cmd = json_data["cmd"] command = Command(cmd) try: valve = json_data["out"] except Exception as exception: logger.warning( f"Could not find valve out parameter. \ Will use valve 1: {exception}" ) valve = 1
file_path = PROGRAM + str(valve) + PROGRAM_EXT
9
2023-12-22 08:06:09+00:00
16k
shibing624/chatgpt-webui
src/models.py
[ { "identifier": "shared", "path": "src/shared.py", "snippet": "class State:\n def interrupt(self):\n def recover(self):\n def set_api_host(self, api_host: str):\n def reset_api_host(self):\n def reset_all(self):\n def set_api_key_queue(self, api_key_list):\n def switching_api_key(self, func):\n def wrapped(*args, **kwargs):" }, { "identifier": "config", "path": "src/config.py", "snippet": "def retrieve_openai_api(api_key=None):\ndef retrieve_proxy(proxy=None):\ndef update_doc_config(two_column_pdf):" }, { "identifier": "BaseLLMModel", "path": "src/base_model.py", "snippet": "class BaseLLMModel:\n def __init__(\n self,\n model_name,\n system_prompt=INITIAL_SYSTEM_PROMPT,\n temperature=1.0,\n top_p=1.0,\n n_choices=1,\n stop=\"\",\n max_generation_token=None,\n presence_penalty=0,\n frequency_penalty=0,\n logit_bias=None,\n user=\"\",\n single_turn=False,\n ) -> None:\n self.history = []\n self.all_token_counts = []\n self.model_name = model_name\n self.model_type = ModelType.get_type(model_name)\n try:\n self.token_upper_limit = MODEL_TOKEN_LIMIT[model_name]\n except KeyError:\n self.token_upper_limit = DEFAULT_TOKEN_LIMIT\n self.interrupted = False\n self.system_prompt = system_prompt\n self.api_key = None\n self.need_api_key = False\n self.history_file_path = get_first_history_name(user)\n self.user_name = user\n self.chatbot = []\n\n self.default_single_turn = single_turn\n self.default_temperature = temperature\n self.default_top_p = top_p\n self.default_n_choices = n_choices\n self.default_stop_sequence = stop\n self.default_max_generation_token = max_generation_token\n self.default_presence_penalty = presence_penalty\n self.default_frequency_penalty = frequency_penalty\n self.default_logit_bias = logit_bias\n self.default_user_identifier = user\n\n self.single_turn = single_turn\n self.temperature = temperature\n self.top_p = top_p\n self.n_choices = n_choices\n self.stop_sequence = stop\n self.max_generation_token = max_generation_token\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.logit_bias = logit_bias\n self.user_identifier = user\n\n self.metadata = {}\n\n def get_answer_stream_iter(self):\n \"\"\"stream predict, need to be implemented\n conversations are stored in self.history, with the most recent question, in OpenAI format\n should return a generator, each time give the next word (str) in the answer\n \"\"\"\n logger.warning(\"stream predict not implemented, using at once predict instead\")\n response, _ = self.get_answer_at_once()\n yield response\n\n def get_answer_at_once(self):\n \"\"\"predict at once, need to be implemented\n conversations are stored in history, with the most recent question, in OpenAI format\n Should return:\n the answer (str)\n total token count (int)\n \"\"\"\n logger.warning(\"at once predict not implemented, using stream predict instead\")\n response_iter = self.get_answer_stream_iter()\n count = 0\n response = ''\n for response in response_iter:\n count += 1\n return response, sum(self.all_token_counts) + count\n\n def billing_info(self):\n \"\"\"get billing infomation, inplement if needed\"\"\"\n return BILLING_NOT_APPLICABLE_MSG\n\n def count_token(self, user_input):\n \"\"\"get token count from input, implement if needed\"\"\"\n return len(user_input)\n\n def stream_next_chatbot(self, inputs, chatbot, fake_input=None, display_append=\"\"):\n def get_return_value():\n return chatbot, status_text\n\n status_text = i18n(\"开始实时传输回答……\")\n if fake_input:\n chatbot.append((fake_input, \"\"))\n else:\n chatbot.append((inputs, \"\"))\n\n user_token_count = self.count_token(inputs)\n self.all_token_counts.append(user_token_count)\n logger.debug(f\"输入token计数: {user_token_count}\")\n\n stream_iter = self.get_answer_stream_iter()\n\n if display_append:\n display_append = (\n '\\n\\n<hr class=\"append-display no-in-raw\" />' + display_append\n )\n\n partial_text = \"\"\n token_increment = 1\n for partial_text in stream_iter:\n if type(partial_text) == tuple:\n partial_text, token_increment = partial_text\n chatbot[-1] = (chatbot[-1][0], partial_text + display_append)\n self.all_token_counts[-1] += token_increment\n status_text = self.token_message()\n yield get_return_value()\n if self.interrupted:\n self.recover()\n break\n self.history.append(construct_assistant(partial_text))\n\n def next_chatbot_at_once(self, inputs, chatbot, fake_input=None, display_append=\"\"):\n if fake_input:\n chatbot.append((fake_input, \"\"))\n else:\n chatbot.append((inputs, \"\"))\n if fake_input is not None:\n user_token_count = self.count_token(fake_input)\n else:\n user_token_count = self.count_token(inputs)\n self.all_token_counts.append(user_token_count)\n ai_reply, total_token_count = self.get_answer_at_once()\n self.history.append(construct_assistant(ai_reply))\n if fake_input is not None:\n self.history[-2] = construct_user(fake_input)\n chatbot[-1] = (chatbot[-1][0], ai_reply + display_append)\n if fake_input is not None:\n self.all_token_counts[-1] += count_token(construct_assistant(ai_reply))\n else:\n self.all_token_counts[-1] = total_token_count - sum(self.all_token_counts)\n status_text = self.token_message()\n return chatbot, status_text\n\n def handle_file_upload(self, files, chatbot, language):\n \"\"\"if the model accepts modal input, implement this function\"\"\"\n status = gr.Markdown.update()\n if files:\n construct_index(self.api_key, files=files)\n status = i18n(\"索引构建完成\")\n return gr.Files.update(), chatbot, status\n\n def prepare_inputs(\n self, real_inputs, use_websearch,\n files, reply_language, chatbot,\n load_from_cache_if_possible=True,\n ):\n display_append = []\n limited_context = False\n if type(real_inputs) == list:\n fake_inputs = real_inputs[0][\"text\"]\n else:\n fake_inputs = real_inputs\n if files:\n from langchain.vectorstores.base import VectorStoreRetriever\n from langchain.retrievers import BM25Retriever, EnsembleRetriever\n limited_context = True\n msg = \"加载索引中……\"\n logger.info(msg)\n index, documents = construct_index(\n self.api_key,\n files=files,\n load_from_cache_if_possible=load_from_cache_if_possible,\n )\n assert index is not None, \"获取索引失败\"\n msg = \"索引获取成功,生成回答中……\"\n logger.info(msg)\n k = 3\n score_threshold = 0.6\n with retrieve_proxy():\n vec_retriever = VectorStoreRetriever(\n vectorstore=index,\n search_type=\"similarity_score_threshold\",\n search_kwargs={\"k\": k, \"score_threshold\": score_threshold}\n )\n bm25_retriever = BM25Retriever.from_documents(documents, preprocess_func=chinese_preprocessing_func)\n bm25_retriever.k = k\n ensemble_retriever = EnsembleRetriever(\n retrievers=[bm25_retriever, vec_retriever],\n weights=[0.5, 0.5],\n )\n try:\n relevant_documents = ensemble_retriever.get_relevant_documents(fake_inputs)\n except:\n return self.prepare_inputs(\n fake_inputs,\n use_websearch,\n files,\n reply_language,\n chatbot,\n load_from_cache_if_possible=False,\n )\n reference_results = [\n [d.page_content.strip(\"�\"), os.path.basename(d.metadata[\"source\"])]\n for d in relevant_documents\n ]\n reference_results = add_source_numbers(reference_results)\n display_append = add_details(reference_results)\n display_append = \"\\n\\n\" + \"\".join(display_append)\n if type(real_inputs) == list:\n real_inputs[0][\"text\"] = (\n replace_today(PROMPT_TEMPLATE)\n .replace(\"{query_str}\", fake_inputs)\n .replace(\"{context_str}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n real_inputs = (\n replace_today(PROMPT_TEMPLATE)\n .replace(\"{query_str}\", real_inputs)\n .replace(\"{context_str}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n elif use_websearch:\n from duckduckgo_search import DDGS\n search_results = []\n with DDGS() as ddgs:\n ddgs_gen = ddgs.text(fake_inputs, backend=\"lite\")\n for r in islice(ddgs_gen, 10):\n search_results.append(r)\n reference_results = []\n for idx, result in enumerate(search_results):\n logger.debug(f\"搜索结果{idx + 1}:{result}\")\n domain_name = urllib3.util.parse_url(result[\"href\"]).host\n reference_results.append([result[\"body\"], result[\"href\"]])\n display_append.append(\n # f\"{idx+1}. [{domain_name}]({result['href']})\\n\"\n f\"<a href=\\\"{result['href']}\\\" target=\\\"_blank\\\">{idx + 1}.&nbsp;{result['title']}</a>\"\n )\n reference_results = add_source_numbers(reference_results)\n # display_append = \"<ol>\\n\\n\" + \"\".join(display_append) + \"</ol>\"\n display_append = (\n '<div class = \"source-a\">' + \"\".join(display_append) + \"</div>\"\n )\n if type(real_inputs) == list:\n real_inputs[0][\"text\"] = (\n replace_today(WEBSEARCH_PTOMPT_TEMPLATE)\n .replace(\"{query}\", fake_inputs)\n .replace(\"{web_results}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n real_inputs = (\n replace_today(WEBSEARCH_PTOMPT_TEMPLATE)\n .replace(\"{query}\", fake_inputs)\n .replace(\"{web_results}\", \"\\n\\n\".join(reference_results))\n .replace(\"{reply_language}\", reply_language)\n )\n else:\n display_append = \"\"\n return limited_context, fake_inputs, display_append, real_inputs, chatbot\n\n def predict(\n self,\n inputs,\n chatbot,\n stream=False,\n use_websearch=False,\n files=None,\n reply_language=\"中文\",\n should_check_token_count=True,\n ): # repetition_penalty, top_k\n\n status_text = \"开始生成回答……\"\n if type(inputs) == list:\n logger.info(\n \"用户\"\n + f\"{self.user_name}\"\n + \"的输入为:\"\n + \"(\"\n + str(len(inputs) - 1)\n + \" images) \"\n + f\"{inputs[0]['text']}\"\n )\n else:\n logger.info(\n \"用户\"\n + f\"{self.user_name}\"\n + \"的输入为:\"\n + f\"{inputs}\"\n )\n if should_check_token_count:\n if type(inputs) == list:\n yield chatbot + [(inputs[0][\"text\"], \"\")], status_text\n else:\n yield chatbot + [(inputs, \"\")], status_text\n if reply_language == \"跟随问题语言(不稳定)\":\n reply_language = \"the same language as the question, such as English, 中文, 日本語, Español, Français, or Deutsch.\"\n\n limited_context, fake_inputs, display_append, inputs, chatbot = self.prepare_inputs(\n real_inputs=inputs,\n use_websearch=use_websearch,\n files=files,\n reply_language=reply_language,\n chatbot=chatbot\n )\n yield chatbot + [(fake_inputs, \"\")], status_text\n\n if (\n self.need_api_key and\n self.api_key is None\n and not shared.state.multi_api_key\n ):\n status_text = STANDARD_ERROR_MSG + NO_APIKEY_MSG\n logger.info(status_text)\n chatbot.append((inputs, \"\"))\n if len(self.history) == 0:\n self.history.append(construct_user(inputs))\n self.history.append(\"\")\n self.all_token_counts.append(0)\n else:\n self.history[-2] = construct_user(inputs)\n yield chatbot + [(inputs, \"\")], status_text\n return\n elif len(inputs.strip()) == 0:\n status_text = STANDARD_ERROR_MSG + NO_INPUT_MSG\n logger.info(status_text)\n yield chatbot + [(inputs, \"\")], status_text\n return\n\n if self.single_turn:\n self.history = []\n self.all_token_counts = []\n if type(inputs) == list:\n self.history.append(inputs)\n else:\n self.history.append(construct_user(inputs))\n\n try:\n if stream:\n logger.debug(\"使用流式传输\")\n iter = self.stream_next_chatbot(\n inputs,\n chatbot,\n fake_input=fake_inputs,\n display_append=display_append,\n )\n for chatbot, status_text in iter:\n yield chatbot, status_text\n else:\n logger.debug(\"不使用流式传输\")\n chatbot, status_text = self.next_chatbot_at_once(\n inputs,\n chatbot,\n fake_input=fake_inputs,\n display_append=display_append,\n )\n yield chatbot, status_text\n except Exception as e:\n traceback.print_exc()\n status_text = STANDARD_ERROR_MSG + str(e)\n yield chatbot, status_text\n\n if len(self.history) > 1 and self.history[-1][\"content\"] != inputs:\n logger.info(\"回答为:\" + f\"{self.history[-1]['content']}\")\n\n if limited_context:\n self.history = []\n self.all_token_counts = []\n\n max_token = self.token_upper_limit - TOKEN_OFFSET\n\n if sum(self.all_token_counts) > max_token and should_check_token_count:\n count = 0\n while (\n sum(self.all_token_counts)\n > self.token_upper_limit * REDUCE_TOKEN_FACTOR\n and sum(self.all_token_counts) > 0\n ):\n count += 1\n del self.all_token_counts[0]\n del self.history[:2]\n logger.info(status_text)\n status_text = f\"为了防止token超限,模型忘记了早期的 {count} 轮对话\"\n yield chatbot, status_text\n\n def retry(\n self,\n chatbot,\n stream=False,\n use_websearch=False,\n files=None,\n reply_language=\"中文\",\n ):\n logger.debug(\"重试中……\")\n if len(self.history) > 1:\n inputs = self.history[-2][\"content\"]\n del self.history[-2:]\n if len(self.all_token_counts) > 0:\n self.all_token_counts.pop()\n elif len(chatbot) > 0:\n inputs = chatbot[-1][0]\n if '<div class=\"user-message\">' in inputs:\n inputs = inputs.split('<div class=\"user-message\">')[1]\n inputs = inputs.split(\"</div>\")[0]\n elif len(self.history) == 1:\n inputs = self.history[-1][\"content\"]\n del self.history[-1]\n else:\n yield chatbot, f\"{STANDARD_ERROR_MSG}上下文是空的\"\n return\n\n iter = self.predict(\n inputs,\n chatbot,\n stream=stream,\n use_websearch=use_websearch,\n files=files,\n reply_language=reply_language,\n )\n for x in iter:\n yield x\n logger.debug(\"重试完毕\")\n\n def interrupt(self):\n self.interrupted = True\n\n def recover(self):\n self.interrupted = False\n\n def set_token_upper_limit(self, new_upper_limit):\n self.token_upper_limit = new_upper_limit\n logger.info(f\"token上限设置为{new_upper_limit}\")\n self.auto_save()\n\n def set_temperature(self, new_temperature):\n self.temperature = new_temperature\n self.auto_save()\n\n def set_top_p(self, new_top_p):\n self.top_p = new_top_p\n self.auto_save()\n\n def set_n_choices(self, new_n_choices):\n self.n_choices = new_n_choices\n self.auto_save()\n\n def set_stop_sequence(self, new_stop_sequence: str):\n new_stop_sequence = new_stop_sequence.split(\",\")\n self.stop_sequence = new_stop_sequence\n self.auto_save()\n\n def set_max_tokens(self, new_max_tokens):\n self.max_generation_token = new_max_tokens\n self.auto_save()\n\n def set_presence_penalty(self, new_presence_penalty):\n self.presence_penalty = new_presence_penalty\n self.auto_save()\n\n def set_frequency_penalty(self, new_frequency_penalty):\n self.frequency_penalty = new_frequency_penalty\n self.auto_save()\n\n def set_logit_bias(self, logit_bias):\n self.logit_bias = logit_bias\n self.auto_save()\n\n def encoded_logit_bias(self):\n if self.logit_bias is None:\n return {}\n logit_bias = self.logit_bias.split()\n bias_map = {}\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n for line in logit_bias:\n word, bias_amount = line.split(\":\")\n if word:\n for token in encoding.encode(word):\n bias_map[token] = float(bias_amount)\n return bias_map\n\n def set_user_identifier(self, new_user_identifier):\n self.user_identifier = new_user_identifier\n self.auto_save()\n\n def set_system_prompt(self, new_system_prompt):\n self.system_prompt = new_system_prompt\n self.auto_save()\n\n def set_key(self, new_access_key):\n self.api_key = new_access_key.strip()\n msg = i18n(\"API密钥更改为了\") + hide_middle_chars(self.api_key)\n logger.info(msg)\n return self.api_key, msg\n\n def set_single_turn(self, new_single_turn):\n self.single_turn = new_single_turn\n self.auto_save()\n\n def reset(self, remain_system_prompt=False):\n self.history = []\n self.all_token_counts = []\n self.interrupted = False\n self.history_file_path = new_auto_history_filename(self.user_name)\n history_name = self.history_file_path[:-5]\n choices = [history_name] + get_history_names(self.user_name)\n system_prompt = self.system_prompt if remain_system_prompt else \"\"\n\n self.single_turn = self.default_single_turn\n self.temperature = self.default_temperature\n self.top_p = self.default_top_p\n self.n_choices = self.default_n_choices\n self.stop_sequence = self.default_stop_sequence\n self.max_generation_token = self.default_max_generation_token\n self.presence_penalty = self.default_presence_penalty\n self.frequency_penalty = self.default_frequency_penalty\n self.logit_bias = self.default_logit_bias\n self.user_identifier = self.default_user_identifier\n\n return (\n [],\n self.token_message([0]),\n gr.Radio.update(choices=choices, value=history_name),\n system_prompt,\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n self.stop_sequence,\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n\n def delete_first_conversation(self):\n if self.history:\n del self.history[:2]\n del self.all_token_counts[0]\n return self.token_message()\n\n def delete_last_conversation(self, chatbot):\n if len(chatbot) > 0 and STANDARD_ERROR_MSG in chatbot[-1][1]:\n msg = \"由于包含报错信息,只删除chatbot记录\"\n chatbot = chatbot[:-1]\n return chatbot, self.history\n if len(self.history) > 0:\n self.history = self.history[:-2]\n if len(chatbot) > 0:\n msg = \"删除了一组chatbot对话\"\n chatbot = chatbot[:-1]\n if len(self.all_token_counts) > 0:\n msg = \"删除了一组对话的token计数记录\"\n self.all_token_counts.pop()\n msg = \"删除了一组对话\"\n self.chatbot = chatbot\n self.auto_save(chatbot)\n return chatbot, msg\n\n def token_message(self, token_lst=None):\n if token_lst is None:\n token_lst = self.all_token_counts\n token_sum = 0\n for i in range(len(token_lst)):\n token_sum += sum(token_lst[: i + 1])\n return (\n i18n(\"Token 计数: \")\n + f\"{sum(token_lst)}\"\n + i18n(\",本次对话累计消耗了 \")\n + f\"{token_sum} tokens\"\n )\n\n def rename_chat_history(self, filename, chatbot):\n if filename == \"\":\n return gr.update()\n if not filename.endswith(\".json\"):\n filename += \".json\"\n self.delete_chat_history(self.history_file_path)\n # 命名重复检测\n repeat_file_index = 2\n full_path = os.path.join(HISTORY_DIR, self.user_name, filename)\n while os.path.exists(full_path):\n full_path = os.path.join(\n HISTORY_DIR, self.user_name, f\"{repeat_file_index}_{filename}\"\n )\n repeat_file_index += 1\n filename = os.path.basename(full_path)\n\n self.history_file_path = filename\n save_file(filename, self, chatbot)\n return init_history_list(self.user_name)\n\n def auto_name_chat_history(\n self, name_chat_method, user_question, chatbot, single_turn_checkbox\n ):\n if len(self.history) == 2 and not single_turn_checkbox:\n user_question = self.history[0][\"content\"]\n if type(user_question) == list:\n user_question = user_question[0][\"text\"]\n filename = replace_special_symbols(user_question)[:16] + \".json\"\n return self.rename_chat_history(filename, chatbot)\n else:\n return gr.update()\n\n def auto_save(self, chatbot=None):\n if chatbot is None:\n chatbot = self.chatbot\n save_file(self.history_file_path, self, chatbot)\n\n def export_markdown(self, filename, chatbot):\n if filename == \"\":\n return\n if not filename.endswith(\".md\"):\n filename += \".md\"\n save_file(filename, self, chatbot)\n\n def load_chat_history(self, new_history_file_path=None):\n logger.debug(f\"{self.user_name} 加载对话历史中……\")\n if new_history_file_path is not None:\n if type(new_history_file_path) != str:\n # copy file from new_history_file_path.name to os.path.join(HISTORY_DIR, self.user_name)\n new_history_file_path = new_history_file_path.name\n shutil.copyfile(\n new_history_file_path,\n os.path.join(\n HISTORY_DIR,\n self.user_name,\n os.path.basename(new_history_file_path),\n ),\n )\n self.history_file_path = os.path.basename(new_history_file_path)\n else:\n self.history_file_path = new_history_file_path\n try:\n if self.history_file_path == os.path.basename(self.history_file_path):\n history_file_path = os.path.join(\n HISTORY_DIR, self.user_name, self.history_file_path\n )\n else:\n history_file_path = self.history_file_path\n if not self.history_file_path.endswith(\".json\"):\n history_file_path += \".json\"\n saved_json = {}\n if os.path.exists(history_file_path):\n with open(history_file_path, \"r\", encoding=\"utf-8\") as f:\n saved_json = json.load(f)\n try:\n if type(saved_json[\"history\"][0]) == str:\n logger.info(\"历史记录格式为旧版,正在转换……\")\n new_history = []\n for index, item in enumerate(saved_json[\"history\"]):\n if index % 2 == 0:\n new_history.append(construct_user(item))\n else:\n new_history.append(construct_assistant(item))\n saved_json[\"history\"] = new_history\n logger.info(new_history)\n except:\n pass\n if len(saved_json[\"chatbot\"]) < len(saved_json[\"history\"]) // 2:\n logger.info(\"Trimming corrupted history...\")\n saved_json[\"history\"] = saved_json[\"history\"][-len(saved_json[\"chatbot\"]):]\n logger.info(f\"Trimmed history: {saved_json['history']}\")\n logger.debug(f\"{self.user_name} 加载对话历史完毕\")\n self.history = saved_json[\"history\"]\n self.single_turn = saved_json.get(\"single_turn\", self.single_turn)\n self.temperature = saved_json.get(\"temperature\", self.temperature)\n self.top_p = saved_json.get(\"top_p\", self.top_p)\n self.n_choices = saved_json.get(\"n_choices\", self.n_choices)\n self.stop_sequence = list(saved_json.get(\"stop_sequence\", self.stop_sequence))\n self.token_upper_limit = saved_json.get(\n \"token_upper_limit\", self.token_upper_limit\n )\n self.max_generation_token = saved_json.get(\n \"max_generation_token\", self.max_generation_token\n )\n self.presence_penalty = saved_json.get(\n \"presence_penalty\", self.presence_penalty\n )\n self.frequency_penalty = saved_json.get(\n \"frequency_penalty\", self.frequency_penalty\n )\n self.logit_bias = saved_json.get(\"logit_bias\", self.logit_bias)\n self.user_identifier = saved_json.get(\"user_identifier\", self.user_name)\n self.metadata = saved_json.get(\"metadata\", self.metadata)\n self.chatbot = saved_json[\"chatbot\"]\n return (\n os.path.basename(self.history_file_path)[:-5],\n saved_json[\"system\"],\n saved_json[\"chatbot\"],\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n \",\".join(self.stop_sequence),\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n except:\n # 没有对话历史或者对话历史解析失败\n logger.info(f\"没有找到对话历史记录 {self.history_file_path}\")\n self.reset()\n return (\n os.path.basename(self.history_file_path),\n \"\",\n [],\n self.single_turn,\n self.temperature,\n self.top_p,\n self.n_choices,\n \",\".join(self.stop_sequence),\n self.token_upper_limit,\n self.max_generation_token,\n self.presence_penalty,\n self.frequency_penalty,\n self.logit_bias,\n self.user_identifier,\n )\n\n def delete_chat_history(self, filename):\n if filename == \"CANCELED\":\n return gr.update(), gr.update(), gr.update()\n if filename == \"\":\n return i18n(\"你没有选择任何对话历史\"), gr.update(), gr.update()\n if not filename.endswith(\".json\"):\n filename += \".json\"\n if filename == os.path.basename(filename):\n history_file_path = os.path.join(HISTORY_DIR, self.user_name, filename)\n else:\n history_file_path = filename\n md_history_file_path = history_file_path[:-5] + \".md\"\n try:\n os.remove(history_file_path)\n os.remove(md_history_file_path)\n return i18n(\"删除对话历史成功\"), get_history_list(self.user_name), []\n except:\n logger.info(f\"删除对话历史失败 {history_file_path}\")\n return (\n i18n(\"对话历史\") + filename + i18n(\"已经被删除啦\"),\n get_history_list(self.user_name),\n [],\n )\n\n def auto_load(self):\n filepath = get_history_filepath(self.user_name)\n if not filepath:\n self.history_file_path = new_auto_history_filename(self.user_name)\n else:\n self.history_file_path = filepath\n return self.load_chat_history()\n\n def like(self):\n \"\"\"like the last response, implement if needed\"\"\"\n return gr.update()\n\n def dislike(self):\n \"\"\"dislike the last response, implement if needed\"\"\"\n return gr.update()\n\n def deinitialize(self):\n \"\"\"deinitialize the model, implement if needed\"\"\"\n pass" }, { "identifier": "ModelType", "path": "src/base_model.py", "snippet": "class ModelType(Enum):\n Unknown = -1\n OpenAI = 0\n ChatGLM = 1\n OpenAIInstruct = 2\n OpenAIVision = 3\n Claude = 4\n Qwen = 5\n LLaMA = 6\n\n @classmethod\n def get_type(cls, model_name: str):\n model_name_lower = model_name.lower()\n if \"gpt\" in model_name_lower:\n if \"instruct\" in model_name_lower:\n model_type = ModelType.OpenAIInstruct\n elif \"vision\" in model_name_lower:\n model_type = ModelType.OpenAIVision\n else:\n model_type = ModelType.OpenAI\n elif \"chatglm\" in model_name_lower:\n model_type = ModelType.ChatGLM\n elif \"llama\" in model_name_lower or \"alpaca\" in model_name_lower or \"yi\" in model_name_lower:\n model_type = ModelType.LLaMA\n else:\n model_type = ModelType.Unknown\n return model_type" }, { "identifier": "ChatGLMClient", "path": "src/chatglm.py", "snippet": "class ChatGLMClient(BaseLLMModel):\n def __init__(self, model_name, user_name=\"\"):\n super().__init__(model_name=model_name, user=user_name)\n import torch\n from transformers import AutoModel, AutoTokenizer\n global CHATGLM_TOKENIZER, CHATGLM_MODEL\n self.deinitialize()\n if CHATGLM_TOKENIZER is None or CHATGLM_MODEL is None:\n system_name = platform.system()\n logger.info(f\"Loading model from {model_name}\")\n if model_name in LOCAL_MODELS:\n model_path = LOCAL_MODELS[model_name]\n else:\n model_path = model_name\n CHATGLM_TOKENIZER = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)\n quantified = False\n if \"int4\" in model_name:\n quantified = True\n model = AutoModel.from_pretrained(model_path, trust_remote_code=True, device_map='auto', torch_dtype='auto')\n if torch.cuda.is_available():\n logger.info(\"CUDA is available, using CUDA\")\n model = model.half().cuda()\n # mps加速还存在一些问题,暂时不使用\n elif system_name == \"Darwin\" and model_path is not None and not quantified:\n logger.info(\"Running on macOS, using MPS\")\n # running on macOS and model already downloaded\n model = model.half().to(\"mps\")\n else:\n logger.info(\"GPU is not available, using CPU\")\n model = model.float()\n model = model.eval()\n logger.info(f\"Model loaded from {model_path}\")\n CHATGLM_MODEL = model\n\n def _get_glm3_style_input(self):\n history = self.history\n query = history.pop()[\"content\"]\n return history, query\n\n def _get_glm2_style_input(self):\n history = [x[\"content\"] for x in self.history]\n query = history.pop()\n logger.debug(f\"{history}\")\n assert len(history) % 2 == 0, f\"History should be even length. current history is: {history}\"\n history = [[history[i], history[i + 1]]\n for i in range(0, len(history), 2)]\n return history, query\n\n def _get_glm_style_input(self):\n if \"glm2\" in self.model_name:\n return self._get_glm2_style_input()\n else:\n return self._get_glm3_style_input()\n\n def get_answer_at_once(self):\n history, query = self._get_glm_style_input()\n response, _ = CHATGLM_MODEL.chat(\n CHATGLM_TOKENIZER, query, history=history)\n return response, len(response)\n\n def get_answer_stream_iter(self):\n history, query = self._get_glm_style_input()\n for response, history in CHATGLM_MODEL.stream_chat(\n CHATGLM_TOKENIZER,\n query,\n history,\n max_length=self.token_upper_limit,\n top_p=self.top_p,\n temperature=self.temperature,\n ):\n yield response\n\n def deinitialize(self):\n import gc\n import torch\n # 释放显存\n global CHATGLM_MODEL, CHATGLM_TOKENIZER\n CHATGLM_MODEL = None\n CHATGLM_TOKENIZER = None\n gc.collect()\n torch.cuda.empty_cache()\n logger.info(\"ChatGLM model deinitialized\")" }, { "identifier": "LLaMAClient", "path": "src/llama.py", "snippet": "class LLaMAClient(BaseLLMModel):\n def __init__(self, model_name, user_name=\"\"):\n super().__init__(model_name=model_name, user=user_name)\n from transformers import AutoModelForCausalLM, AutoTokenizer\n self.max_generation_token = 1000\n logger.info(f\"Loading model from {model_name}\")\n if model_name in LOCAL_MODELS:\n model_path = LOCAL_MODELS[model_name]\n else:\n model_path = model_name\n self.tokenizer = AutoTokenizer.from_pretrained(model_path, legacy=True, use_fast=False)\n self.model = AutoModelForCausalLM.from_pretrained(model_path, device_map='auto', torch_dtype='auto').eval()\n logger.info(f\"Model loaded from {model_path}\")\n self.stop_str = self.tokenizer.eos_token or \"</s>\"\n\n def _get_chat_input(self):\n messages = []\n for conv in self.history:\n if conv[\"role\"] == \"system\":\n messages.append({'role': 'system', 'content': conv[\"content\"]})\n elif conv[\"role\"] == \"user\":\n messages.append({'role': 'user', 'content': conv[\"content\"]})\n else:\n messages.append({'role': 'assistant', 'content': conv[\"content\"]})\n input_ids = self.tokenizer.apply_chat_template(\n conversation=messages,\n tokenize=True,\n add_generation_prompt=True,\n return_tensors='pt'\n )\n\n return input_ids.to(self.model.device)\n\n def get_answer_at_once(self):\n input_ids = self._get_chat_input()\n output_ids = self.model.generate(\n input_ids,\n max_new_tokens=self.max_generation_token,\n top_p=self.top_p,\n temperature=self.temperature,\n )\n response = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:], skip_special_tokens=True)\n\n return response, len(response)\n\n def get_answer_stream_iter(self):\n from transformers import TextIteratorStreamer\n from threading import Thread\n input_ids = self._get_chat_input()\n streamer = TextIteratorStreamer(\n self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True\n )\n thread = Thread(\n target=self.model.generate,\n kwargs={\"input_ids\": input_ids,\n \"max_new_tokens\": self.max_generation_token,\n \"top_p\": self.top_p,\n \"temperature\": self.temperature,\n \"streamer\": streamer}\n )\n thread.start()\n generated_text = \"\"\n for new_text in streamer:\n stop = False\n pos = new_text.find(self.stop_str)\n if pos != -1:\n new_text = new_text[:pos]\n stop = True\n generated_text += new_text\n yield generated_text\n if stop:\n break" }, { "identifier": "INITIAL_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "INITIAL_SYSTEM_PROMPT = \"You are a helpful assistant.\"" }, { "identifier": "TIMEOUT_ALL", "path": "src/presets.py", "snippet": "TIMEOUT_ALL = 200 # 非流式对话时的超时时间" }, { "identifier": "TIMEOUT_STREAMING", "path": "src/presets.py", "snippet": "TIMEOUT_STREAMING = 60 # 流式对话时的超时时间" }, { "identifier": "STANDARD_ERROR_MSG", "path": "src/presets.py", "snippet": "STANDARD_ERROR_MSG = i18n(\"☹️发生了错误:\") # 错误信息的标准前缀" }, { "identifier": "CONNECTION_TIMEOUT_MSG", "path": "src/presets.py", "snippet": "CONNECTION_TIMEOUT_MSG = i18n(\"连接超时,无法获取对话。\") # 连接超时" }, { "identifier": "READ_TIMEOUT_MSG", "path": "src/presets.py", "snippet": "READ_TIMEOUT_MSG = i18n(\"读取超时,无法获取对话。\") # 读取超时" }, { "identifier": "ERROR_RETRIEVE_MSG", "path": "src/presets.py", "snippet": "ERROR_RETRIEVE_MSG = i18n(\"请检查网络连接,或者API-Key是否有效。\")" }, { "identifier": "GENERAL_ERROR_MSG", "path": "src/presets.py", "snippet": "GENERAL_ERROR_MSG = i18n(\"获取对话时发生错误,请查看后台日志\")" }, { "identifier": "CHAT_COMPLETION_URL", "path": "src/presets.py", "snippet": "CHAT_COMPLETION_URL = \"https://api.openai.com/v1/chat/completions\"" }, { "identifier": "SUMMARY_CHAT_SYSTEM_PROMPT", "path": "src/presets.py", "snippet": "SUMMARY_CHAT_SYSTEM_PROMPT = \"\"\"\\\nPlease summarize the following conversation for a chat topic.\nNo more than 16 characters.\nNo special characters.\nPunctuation mark is banned.\nNot including '.' ':' '?' '!' '“' '*' '<' '>'.\nReply in user's language.\n\"\"\"" }, { "identifier": "hide_middle_chars", "path": "src/utils.py", "snippet": " class DataframeData(TypedDict):\nclass ConfigType(Enum):\nclass ConfigItem:\nclass SetupWizard:\ndef predict(current_model, *args):\ndef billing_info(current_model):\ndef set_key(current_model, *args):\ndef load_chat_history(current_model, *args):\ndef delete_chat_history(current_model, *args):\ndef interrupt(current_model, *args):\ndef reset(current_model, *args):\ndef retry(current_model, *args):\ndef delete_first_conversation(current_model, *args):\ndef delete_last_conversation(current_model, *args):\ndef set_system_prompt(current_model, *args):\ndef rename_chat_history(current_model, *args):\ndef auto_name_chat_history(current_model, *args):\ndef export_markdown(current_model, *args):\ndef upload_chat_history(current_model, *args):\ndef set_token_upper_limit(current_model, *args):\ndef set_temperature(current_model, *args):\ndef set_top_p(current_model, *args):\ndef set_n_choices(current_model, *args):\ndef set_stop_sequence(current_model, *args):\ndef set_max_tokens(current_model, *args):\ndef set_presence_penalty(current_model, *args):\ndef set_frequency_penalty(current_model, *args):\ndef set_logit_bias(current_model, *args):\ndef set_user_identifier(current_model, *args):\ndef set_single_turn(current_model, *args):\ndef handle_file_upload(current_model, *args):\ndef handle_summarize_index(current_model, *args):\ndef like(current_model, *args):\ndef dislike(current_model, *args):\ndef count_token(input_str):\ndef markdown_to_html_with_syntax_highlight(md_str): # deprecated\n def replacer(match):\ndef normalize_markdown(md_text: str) -> str: # deprecated\ndef convert_mdtext(md_text): # deprecated\ndef clip_rawtext(chat_message, need_escape=True):\ndef convert_bot_before_marked(chat_message):\ndef convert_user_before_marked(chat_message):\ndef escape_markdown(text):\ndef convert_asis(userinput): # deprecated\ndef detect_converted_mark(userinput): # deprecated\ndef detect_language(code): # deprecated\ndef construct_text(role, text):\ndef construct_user(text):\ndef construct_system(text):\ndef construct_assistant(text):\ndef save_file(filename, model, chatbot):\ndef sorted_by_pinyin(list):\ndef sorted_by_last_modified_time(list, dir):\ndef get_file_names_by_type(dir, filetypes=[\".json\"]):\ndef get_file_names_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_dropdown_by_pinyin(dir, filetypes=[\".json\"]):\ndef get_file_names_by_last_modified_time(dir, filetypes=[\".json\"]):\ndef get_history_names(user_name=\"\"):\ndef get_first_history_name(user_name=\"\"):\ndef get_history_list(user_name=\"\"):\ndef init_history_list(user_name=\"\"):\ndef filter_history(user_name, keyword):\ndef load_template(filename, mode=0):\ndef get_template_names():\ndef get_template_dropdown():\ndef get_template_content(templates, selection, original_system_prompt):\ndef reset_textbox():\ndef reset_default():\ndef change_api_host(host):\ndef change_proxy(proxy):\ndef hide_middle_chars(s):\ndef submit_key(key):\ndef replace_today(prompt):\ndef get_geoip():\n def fetch_ip():\ndef find_n(lst, max_num):\ndef start_outputing():\ndef end_outputing():\ndef cancel_outputing():\ndef transfer_input(inputs):\ndef update_chuanhu():\ndef add_source_numbers(lst, source_name=\"Source\", use_source=True):\ndef add_details(lst):\ndef sheet_to_string(sheet, sheet_name=None):\ndef excel_to_string(file_path):\ndef get_last_day_of_month(any_day):\ndef get_model_source(model_name, alternative_source):\ndef refresh_ui_elements_on_load(current_model, selected_model_name, user_name):\ndef toggle_like_btn_visibility(selected_model_name):\ndef get_corresponding_file_type_by_model_name(selected_model_name):\ndef new_auto_history_filename(username):\ndef get_history_filepath(username):\ndef beautify_err_msg(err_msg):\ndef auth_from_conf(username, password):\ndef get_files_hash(file_src=None, file_paths=None):\ndef myprint(**args):\ndef replace_special_symbols(string, replace_string=\" \"):\n def __init__(self, key, name, default=None, type=ConfigType.String) -> None:\ndef generate_prompt_string(config_item):\ndef generate_result_string(config_item, config_value):\n def __init__(self, file_path=config_file) -> None:\n def set(self, config_items: List[ConfigItem], prompt: str):\n def set_users(self):\n def __setitem__(self, setting_key: str, value):\n def __getitem__(self, setting_key: str):\n def save(self):\ndef setup_wizard():\ndef save_pkl(data, file_path):\ndef load_pkl(file_path):\ndef chinese_preprocessing_func(text: str) -> List[str]:\nSERVER_GEO_IP_MSG = None\nFETCHING_IP = False\n SERVER_GEO_IP_MSG = i18n(\"你可以使用聊天功能。\")\n SERVER_GEO_IP_MSG = \"**您的IP区域:中国。**\"\n SERVER_GEO_IP_MSG = i18n(\"您的IP区域:\") + f\"{country}。\"\n FETCHING_IP = False\n FETCHING_IP = True" } ]
import base64 import datetime import json import os import colorama import gradio as gr import requests import traceback import traceback from io import BytesIO from PIL import Image from loguru import logger from src import shared, config from src.base_model import BaseLLMModel, ModelType from src.chatglm import ChatGLMClient from src.llama import LLaMAClient from src.presets import ( INITIAL_SYSTEM_PROMPT, TIMEOUT_ALL, TIMEOUT_STREAMING, STANDARD_ERROR_MSG, CONNECTION_TIMEOUT_MSG, READ_TIMEOUT_MSG, ERROR_RETRIEVE_MSG, GENERAL_ERROR_MSG, CHAT_COMPLETION_URL, SUMMARY_CHAT_SYSTEM_PROMPT ) from src.utils import ( hide_middle_chars, count_token, construct_system, construct_user, get_last_day_of_month, i18n, replace_special_symbols, )
12,948
self.headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", } def _get_billing_data(self, billing_url): with config.retrieve_proxy(): response = requests.get( billing_url, headers=self.headers, timeout=TIMEOUT_ALL, ) if response.status_code == 200: data = response.json() return data else: raise Exception( f"API request failed with status code {response.status_code}: {response.text}" ) def _decode_chat_response(self, response): error_msg = "" for chunk in response.iter_lines(): if chunk: chunk = chunk.decode() chunk_length = len(chunk) try: chunk = json.loads(chunk[6:]) except: print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") error_msg += chunk continue try: if chunk_length > 6 and "delta" in chunk["choices"][0]: if "finish_details" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_details"] elif "finish_reason" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_reason"] else: finish_reason = chunk["finish_details"] if finish_reason == "stop": break try: yield chunk["choices"][0]["delta"]["content"] except Exception as e: # logger.error(f"Error: {e}") continue except: print(f"ERROR: {chunk}") continue if error_msg and not error_msg == "data: [DONE]": raise Exception(error_msg) def set_key(self, new_access_key): ret = super().set_key(new_access_key) self._refresh_header() return ret def _single_query_at_once(self, history, temperature=1.0): timeout = TIMEOUT_ALL headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "temperature": f"{temperature}", } payload = { "model": self.model_name, "messages": history, } # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=False, timeout=timeout, ) return response def get_model( model_name, lora_model_path=None, access_key=None, temperature=None, top_p=None, system_prompt=None, user_name="", original_model=None, ): msg = i18n("模型设置为了:") + f" {model_name}" model_type = ModelType.get_type(model_name) lora_choices = ["No LoRA"] if model_type != ModelType.OpenAI: config.local_embedding = True model = original_model chatbot = gr.Chatbot.update(label=model_name) try: if model_type == ModelType.OpenAI: logger.info(f"正在加载OpenAI模型: {model_name}") model = OpenAIClient( model_name=model_name, api_key=access_key, system_prompt=system_prompt, user_name=user_name, ) logger.info(f"OpenAI模型加载完成: {model_name}") elif model_type == ModelType.OpenAIVision: logger.info(f"正在加载OpenAI Vision模型: {model_name}") access_key = os.environ.get("OPENAI_API_KEY", access_key) model = OpenAIVisionClient( model_name, api_key=access_key, user_name=user_name) elif model_type == ModelType.ChatGLM: logger.info(f"正在加载ChatGLM模型: {model_name}")
# -*- coding: utf-8 -*- """ Get model client from model name """ class OpenAIClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="", ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name, ) self.api_key = api_key self.need_api_key = True self._refresh_header() def get_answer_stream_iter(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): if not self.api_key: raise ValueError("API key is not set") response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e)) return i18n("**获取API使用情况失败**") rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" except requests.exceptions.ConnectTimeout: status_text = ( STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG ) return status_text except requests.exceptions.ReadTimeout: status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG return status_text except Exception as e: traceback.print_exc() logger.error(i18n("获取API使用情况失败:") + str(e)) return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG def set_token_upper_limit(self, new_upper_limit): pass @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 def _get_response(self, stream=False): openai_api_key = self.api_key system_prompt = self.system_prompt history = self.history logger.debug(f"{history}") headers = { "Authorization": f"Bearer {openai_api_key}", "Content-Type": "application/json", } if system_prompt is not None: history = [construct_system(system_prompt), *history] payload = { "model": self.model_name, "messages": history, "temperature": self.temperature, "top_p": self.top_p, "n": self.n_choices, "stream": stream, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, } if self.max_generation_token is not None: payload["max_tokens"] = self.max_generation_token if self.stop_sequence is not None: payload["stop"] = self.stop_sequence if self.logit_bias is not None: payload["logit_bias"] = self.logit_bias if self.user_identifier is not None: payload["user"] = self.user_identifier if stream: timeout = TIMEOUT_STREAMING else: timeout = TIMEOUT_ALL # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): try: response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=stream, timeout=timeout, ) except Exception as e: logger.error(f"Error: {e}") response = None return response def _refresh_header(self): self.headers = { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", } def _get_billing_data(self, billing_url): with config.retrieve_proxy(): response = requests.get( billing_url, headers=self.headers, timeout=TIMEOUT_ALL, ) if response.status_code == 200: data = response.json() return data else: raise Exception( f"API request failed with status code {response.status_code}: {response.text}" ) def _decode_chat_response(self, response): error_msg = "" for chunk in response.iter_lines(): if chunk: chunk = chunk.decode() chunk_length = len(chunk) try: chunk = json.loads(chunk[6:]) except Exception as e: print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") error_msg += chunk continue try: if chunk_length > 6 and "delta" in chunk["choices"][0]: if "finish_reason" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_reason"] else: finish_reason = chunk["finish_reason"] if finish_reason == "stop": break try: yield chunk["choices"][0]["delta"]["content"] except Exception as e: logger.error(f"Error: {e}") continue except: print(f"ERROR: {chunk}") continue if error_msg and not error_msg == "data: [DONE]": raise Exception(error_msg) def set_key(self, new_access_key): ret = super().set_key(new_access_key) self._refresh_header() return ret def _single_query_at_once(self, history, temperature=1.0): timeout = TIMEOUT_ALL headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "temperature": f"{temperature}", } payload = { "model": self.model_name, "messages": history, } # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=False, timeout=timeout, ) return response def auto_name_chat_history(self, name_chat_method, user_question, chatbot, single_turn_checkbox): if len(self.history) == 2 and not single_turn_checkbox and not config.hide_history_when_not_logged_in: user_question = self.history[0]["content"] if name_chat_method == i18n("模型自动总结(消耗tokens)"): ai_answer = self.history[1]["content"] try: history = [ {"role": "system", "content": SUMMARY_CHAT_SYSTEM_PROMPT}, {"role": "user", "content": f"Please write a title based on the following conversation:\n---\nUser: {user_question}\nAssistant: {ai_answer}"} ] response = self._single_query_at_once(history, temperature=0.0) response = json.loads(response.text) content = response["choices"][0]["message"]["content"] filename = replace_special_symbols(content) + ".json" except Exception as e: logger.info(f"自动命名失败。{e}") filename = replace_special_symbols(user_question)[:16] + ".json" return self.rename_chat_history(filename, chatbot) elif name_chat_method == i18n("第一条提问"): filename = replace_special_symbols(user_question)[:16] + ".json" return self.rename_chat_history(filename, chatbot) else: return gr.update() else: return gr.update() class OpenAIVisionClient(BaseLLMModel): def __init__( self, model_name, api_key, system_prompt=INITIAL_SYSTEM_PROMPT, temperature=1.0, top_p=1.0, user_name="" ) -> None: super().__init__( model_name=model_name, temperature=temperature, top_p=top_p, system_prompt=system_prompt, user=user_name ) self.api_key = api_key self.need_api_key = True self.max_generation_token = 4096 self.images = [] self._refresh_header() def get_answer_stream_iter(self): response = self._get_response(stream=True) if response is not None: iter = self._decode_chat_response(response) partial_text = "" for i in iter: partial_text += i yield partial_text else: yield STANDARD_ERROR_MSG + GENERAL_ERROR_MSG def get_answer_at_once(self): response = self._get_response() response = json.loads(response.text) content = response["choices"][0]["message"]["content"] total_token_count = response["usage"]["total_tokens"] return content, total_token_count def try_read_image(self, filepath): def is_image_file(filepath): # 判断文件是否为图片 valid_image_extensions = [ ".jpg", ".jpeg", ".png", ".bmp", ".gif", ".tiff"] file_extension = os.path.splitext(filepath)[1].lower() return file_extension in valid_image_extensions def image_to_base64(image_path): # 打开并加载图片 img = Image.open(image_path) # 获取图片的宽度和高度 width, height = img.size # 计算压缩比例,以确保最长边小于4096像素 max_dimension = 2048 scale_ratio = min(max_dimension / width, max_dimension / height) if scale_ratio < 1: # 按压缩比例调整图片大小 new_width = int(width * scale_ratio) new_height = int(height * scale_ratio) img = img.resize((new_width, new_height), Image.LANCZOS) # 将图片转换为jpg格式的二进制数据 buffer = BytesIO() if img.mode == "RGBA": img = img.convert("RGB") img.save(buffer, format='JPEG') binary_image = buffer.getvalue() # 对二进制数据进行Base64编码 base64_image = base64.b64encode(binary_image).decode('utf-8') return base64_image if is_image_file(filepath): logger.info(f"读取图片文件: {filepath}") base64_image = image_to_base64(filepath) self.images.append({ "path": filepath, "base64": base64_image, }) def handle_file_upload(self, files, chatbot, language): """if the model accepts multi modal input, implement this function""" if files: for file in files: if file.name: self.try_read_image(file.name) if self.images is not None: chatbot = chatbot + [([image["path"] for image in self.images], None)] return None, chatbot, None def prepare_inputs(self, real_inputs, use_websearch, files, reply_language, chatbot, load_from_cache_if_possible=True): fake_inputs = real_inputs display_append = "" limited_context = False return limited_context, fake_inputs, display_append, real_inputs, chatbot def count_token(self, user_input): input_token_count = count_token(construct_user(user_input)) if self.system_prompt is not None and len(self.all_token_counts) == 0: system_prompt_token_count = count_token( construct_system(self.system_prompt) ) return input_token_count + system_prompt_token_count return input_token_count def billing_info(self): try: curr_time = datetime.datetime.now() last_day_of_month = get_last_day_of_month( curr_time).strftime("%Y-%m-%d") first_day_of_month = curr_time.replace(day=1).strftime("%Y-%m-%d") usage_url = f"{shared.state.usage_api_url}?start_date={first_day_of_month}&end_date={last_day_of_month}" try: usage_data = self._get_billing_data(usage_url) except Exception as e: logger.warning(f"获取API使用情况失败:" + str(e)) return i18n("**获取API使用情况失败**") rounded_usage = "{:.5f}".format(usage_data["total_usage"] / 100) return i18n("**本月使用金额** ") + f"\u3000 ${rounded_usage}" except requests.exceptions.ConnectTimeout: status_text = ( STANDARD_ERROR_MSG + CONNECTION_TIMEOUT_MSG + ERROR_RETRIEVE_MSG ) return status_text except requests.exceptions.ReadTimeout: status_text = STANDARD_ERROR_MSG + READ_TIMEOUT_MSG + ERROR_RETRIEVE_MSG return status_text except Exception as e: traceback.print_exc() logger.error(i18n("获取API使用情况失败:") + str(e)) return STANDARD_ERROR_MSG + ERROR_RETRIEVE_MSG @shared.state.switching_api_key # 在不开启多账号模式的时候,这个装饰器不会起作用 def _get_response(self, stream=False): openai_api_key = self.api_key system_prompt = self.system_prompt history = self.history if self.images: self.history[-1]["content"] = [ {"type": "text", "text": self.history[-1]["content"]}, *[{"type": "image_url", "image_url": "data:image/jpeg;base64," + image["base64"]} for image in self.images] ] self.images = [] logger.debug(colorama.Fore.YELLOW + f"{history}" + colorama.Fore.RESET) headers = { "Content-Type": "application/json", "Authorization": f"Bearer {openai_api_key}", } if system_prompt is not None: history = [construct_system(system_prompt), *history] payload = { "model": self.model_name, "messages": history, "temperature": self.temperature, "top_p": self.top_p, "n": self.n_choices, "stream": stream, "presence_penalty": self.presence_penalty, "frequency_penalty": self.frequency_penalty, "max_tokens": 4096 } if self.stop_sequence is not None: payload["stop"] = self.stop_sequence if self.logit_bias is not None: payload["logit_bias"] = self.encoded_logit_bias() if self.user_identifier: payload["user"] = self.user_identifier if stream: timeout = TIMEOUT_STREAMING else: timeout = TIMEOUT_ALL # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): try: response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=stream, timeout=timeout, ) except: return None return response def _refresh_header(self): self.headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", } def _get_billing_data(self, billing_url): with config.retrieve_proxy(): response = requests.get( billing_url, headers=self.headers, timeout=TIMEOUT_ALL, ) if response.status_code == 200: data = response.json() return data else: raise Exception( f"API request failed with status code {response.status_code}: {response.text}" ) def _decode_chat_response(self, response): error_msg = "" for chunk in response.iter_lines(): if chunk: chunk = chunk.decode() chunk_length = len(chunk) try: chunk = json.loads(chunk[6:]) except: print(i18n("JSON解析错误,收到的内容: ") + f"{chunk}") error_msg += chunk continue try: if chunk_length > 6 and "delta" in chunk["choices"][0]: if "finish_details" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_details"] elif "finish_reason" in chunk["choices"][0]: finish_reason = chunk["choices"][0]["finish_reason"] else: finish_reason = chunk["finish_details"] if finish_reason == "stop": break try: yield chunk["choices"][0]["delta"]["content"] except Exception as e: # logger.error(f"Error: {e}") continue except: print(f"ERROR: {chunk}") continue if error_msg and not error_msg == "data: [DONE]": raise Exception(error_msg) def set_key(self, new_access_key): ret = super().set_key(new_access_key) self._refresh_header() return ret def _single_query_at_once(self, history, temperature=1.0): timeout = TIMEOUT_ALL headers = { "Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}", "temperature": f"{temperature}", } payload = { "model": self.model_name, "messages": history, } # 如果有自定义的api-host,使用自定义host发送请求,否则使用默认设置发送请求 if shared.state.chat_completion_url != CHAT_COMPLETION_URL: logger.debug(f"使用自定义API URL: {shared.state.chat_completion_url}") with config.retrieve_proxy(): response = requests.post( shared.state.chat_completion_url, headers=headers, json=payload, stream=False, timeout=timeout, ) return response def get_model( model_name, lora_model_path=None, access_key=None, temperature=None, top_p=None, system_prompt=None, user_name="", original_model=None, ): msg = i18n("模型设置为了:") + f" {model_name}" model_type = ModelType.get_type(model_name) lora_choices = ["No LoRA"] if model_type != ModelType.OpenAI: config.local_embedding = True model = original_model chatbot = gr.Chatbot.update(label=model_name) try: if model_type == ModelType.OpenAI: logger.info(f"正在加载OpenAI模型: {model_name}") model = OpenAIClient( model_name=model_name, api_key=access_key, system_prompt=system_prompt, user_name=user_name, ) logger.info(f"OpenAI模型加载完成: {model_name}") elif model_type == ModelType.OpenAIVision: logger.info(f"正在加载OpenAI Vision模型: {model_name}") access_key = os.environ.get("OPENAI_API_KEY", access_key) model = OpenAIVisionClient( model_name, api_key=access_key, user_name=user_name) elif model_type == ModelType.ChatGLM: logger.info(f"正在加载ChatGLM模型: {model_name}")
model = ChatGLMClient(model_name, user_name=user_name)
4
2023-12-27 12:14:26+00:00
16k
camenduru/AnyDoor-online-hf
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x,torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n if log_ema or self.use_ema:\n with self.ema_scope():\n xrec_ema, posterior_ema = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec_ema.shape[1] > 3\n xrec_ema = self.to_rgb(xrec_ema)\n log[\"samples_ema\"] = self.decode(torch.randn_like(posterior_ema.sample()))\n log[\"reconstructions_ema\"] = xrec_ema\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,835
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
14
2023-12-25 04:48:34+00:00
16k
AContesini/Convert_PDF_to_DOCX_or_vice-versa
venv/Lib/site-packages/tqdm/auto.py
[ { "identifier": "TqdmExperimentalWarning", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class TqdmExperimentalWarning(TqdmWarning, FutureWarning):\n \"\"\"beta feature, unstable API and behaviour\"\"\"\n pass" }, { "identifier": "tqdm", "path": "venv/Lib/site-packages/tqdm/asyncio.py", "snippet": "class tqdm_asyncio(std_tqdm):\n def __init__(self, iterable=None, *args, **kwargs):\n def __aiter__(self):\n async def __anext__(self):\n def send(self, *args, **kwargs):\n def as_completed(cls, fs, *, loop=None, timeout=None, total=None, **tqdm_kwargs):\n async def gather(cls, *fs, loop=None, timeout=None, total=None, **tqdm_kwargs):\n async def wrap_awaitable(i, f):\ndef tarange(*args, **kwargs):" }, { "identifier": "tqdm", "path": "venv/Lib/site-packages/tqdm/std.py", "snippet": "class tqdm(Comparable):\n \"\"\"\n Decorate an iterable object, returning an iterator which acts exactly\n like the original iterable, but prints a dynamically updating\n progressbar every time a value is requested.\n\n Parameters\n ----------\n iterable : iterable, optional\n Iterable to decorate with a progressbar.\n Leave blank to manually manage the updates.\n desc : str, optional\n Prefix for the progressbar.\n total : int or float, optional\n The number of expected iterations. If unspecified,\n len(iterable) is used if possible. If float(\"inf\") or as a last\n resort, only basic progress statistics are displayed\n (no ETA, no progressbar).\n If `gui` is True and this parameter needs subsequent updating,\n specify an initial arbitrary large positive number,\n e.g. 9e9.\n leave : bool, optional\n If [default: True], keeps all traces of the progressbar\n upon termination of iteration.\n If `None`, will leave only if `position` is `0`.\n file : `io.TextIOWrapper` or `io.StringIO`, optional\n Specifies where to output the progress messages\n (default: sys.stderr). Uses `file.write(str)` and `file.flush()`\n methods. For encoding, see `write_bytes`.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes the progressbar to stay within this bound.\n If unspecified, attempts to use environment width. The\n fallback is a meter width of 10 and no limit for the counter and\n statistics. If 0, will not print any meter (only stats).\n mininterval : float, optional\n Minimum progress display update interval [default: 0.1] seconds.\n maxinterval : float, optional\n Maximum progress display update interval [default: 10] seconds.\n Automatically adjusts `miniters` to correspond to `mininterval`\n after long display update lag. Only works if `dynamic_miniters`\n or monitor thread is enabled.\n miniters : int or float, optional\n Minimum progress display update interval, in iterations.\n If 0 and `dynamic_miniters`, will automatically adjust to equal\n `mininterval` (more CPU efficient, good for tight loops).\n If > 0, will skip display of specified number of iterations.\n Tweak this and `mininterval` to get very efficient loops.\n If your progress is erratic with both fast and slow iterations\n (network, skipping items, etc) you should set miniters=1.\n ascii : bool or str, optional\n If unspecified or False, use unicode (smooth blocks) to fill\n the meter. The fallback is to use ASCII characters \" 123456789#\".\n disable : bool, optional\n Whether to disable the entire progressbar wrapper\n [default: False]. If set to None, disable on non-TTY.\n unit : str, optional\n String that will be used to define the unit of each iteration\n [default: it].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be reduced/scaled\n automatically and a metric prefix following the\n International System of Units standard will be added\n (kilo, mega, etc.) [default: False]. If any other non-zero\n number, will scale `total` and `n`.\n dynamic_ncols : bool, optional\n If set, constantly alters `ncols` and `nrows` to the\n environment (allowing for window resizes) [default: False].\n smoothing : float, optional\n Exponential moving average smoothing factor for speed estimates\n (ignored in GUI mode). Ranges from 0 (average speed) to 1\n (current/instantaneous speed) [default: 0.3].\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n initial : int or float, optional\n The initial counter value. Useful when restarting a progress\n bar [default: 0]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n position : int, optional\n Specify the line offset to print this bar (starting from 0)\n Automatic if unspecified.\n Useful to manage multiple bars at once (eg, from threads).\n postfix : dict or *, optional\n Specify additional stats to display at the end of the bar.\n Calls `set_postfix(**postfix)` if possible (dict).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n write_bytes : bool, optional\n Whether to write bytes. If (default: False) will write unicode.\n lock_args : tuple, optional\n Passed to `refresh` for intermediate output\n (initialisation, iterating, and updating).\n nrows : int, optional\n The screen height. If specified, hides nested bars outside this\n bound. If unspecified, attempts to use environment height.\n The fallback is 20.\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n delay : float, optional\n Don't display until [default: 0] seconds have elapsed.\n gui : bool, optional\n WARNING: internal parameter - do not use.\n Use tqdm.gui.tqdm(...) instead. If set, will attempt to use\n matplotlib animations for a graphical output [default: False].\n\n Returns\n -------\n out : decorated iterator.\n \"\"\"\n\n monitor_interval = 10 # set to 0 to disable the thread\n monitor = None\n _instances = WeakSet()\n\n @staticmethod\n def format_sizeof(num, suffix='', divisor=1000):\n \"\"\"\n Formats a number (greater than unity) with SI Order of Magnitude\n prefixes.\n\n Parameters\n ----------\n num : float\n Number ( >= 1) to format.\n suffix : str, optional\n Post-postfix [default: ''].\n divisor : float, optional\n Divisor between prefixes [default: 1000].\n\n Returns\n -------\n out : str\n Number with Order of Magnitude SI unit postfix.\n \"\"\"\n for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:\n if abs(num) < 999.5:\n if abs(num) < 99.95:\n if abs(num) < 9.995:\n return '{0:1.2f}'.format(num) + unit + suffix\n return '{0:2.1f}'.format(num) + unit + suffix\n return '{0:3.0f}'.format(num) + unit + suffix\n num /= divisor\n return '{0:3.1f}Y'.format(num) + suffix\n\n @staticmethod\n def format_interval(t):\n \"\"\"\n Formats a number of seconds as a clock time, [H:]MM:SS\n\n Parameters\n ----------\n t : int\n Number of seconds.\n\n Returns\n -------\n out : str\n [H:]MM:SS\n \"\"\"\n mins, s = divmod(int(t), 60)\n h, m = divmod(mins, 60)\n if h:\n return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)\n else:\n return '{0:02d}:{1:02d}'.format(m, s)\n\n @staticmethod\n def format_num(n):\n \"\"\"\n Intelligent scientific notation (.3g).\n\n Parameters\n ----------\n n : int or float or Numeric\n A Number.\n\n Returns\n -------\n out : str\n Formatted number.\n \"\"\"\n f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')\n n = str(n)\n return f if len(f) < len(n) else n\n\n @staticmethod\n def status_printer(file):\n \"\"\"\n Manage the printing and in-place updating of a line of characters.\n Note that if the string is longer than a line, then in-place\n updating may not work (it will print a new line at each refresh).\n \"\"\"\n fp = file\n fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover\n if fp in (sys.stderr, sys.stdout):\n getattr(sys.stderr, 'flush', lambda: None)()\n getattr(sys.stdout, 'flush', lambda: None)()\n\n def fp_write(s):\n fp.write(str(s))\n fp_flush()\n\n last_len = [0]\n\n def print_status(s):\n len_s = disp_len(s)\n fp_write('\\r' + s + (' ' * max(last_len[0] - len_s, 0)))\n last_len[0] = len_s\n\n return print_status\n\n @staticmethod\n def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',\n unit_scale=False, rate=None, bar_format=None, postfix=None,\n unit_divisor=1000, initial=0, colour=None, **extra_kwargs):\n \"\"\"\n Return a string-based progress bar given some parameters\n\n Parameters\n ----------\n n : int or float\n Number of finished iterations.\n total : int or float\n The expected total number of iterations. If meaningless (None),\n only basic progress statistics are displayed (no ETA).\n elapsed : float\n Number of seconds passed since start.\n ncols : int, optional\n The width of the entire output message. If specified,\n dynamically resizes `{bar}` to stay within this bound\n [default: None]. If `0`, will not print any bar (only stats).\n The fallback is `{bar:10}`.\n prefix : str, optional\n Prefix message (included in total width) [default: ''].\n Use as {desc} in bar_format string.\n ascii : bool, optional or str, optional\n If not set, use unicode (smooth blocks) to fill the meter\n [default: False]. The fallback is to use ASCII characters\n \" 123456789#\".\n unit : str, optional\n The iteration unit [default: 'it'].\n unit_scale : bool or int or float, optional\n If 1 or True, the number of iterations will be printed with an\n appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)\n [default: False]. If any other non-zero number, will scale\n `total` and `n`.\n rate : float, optional\n Manual override for iteration rate.\n If [default: None], uses n/elapsed.\n bar_format : str, optional\n Specify a custom bar string formatting. May impact performance.\n [default: '{l_bar}{bar}{r_bar}'], where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '\n '{rate_fmt}{postfix}]'\n Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,\n percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,\n rate, rate_fmt, rate_noinv, rate_noinv_fmt,\n rate_inv, rate_inv_fmt, postfix, unit_divisor,\n remaining, remaining_s, eta.\n Note that a trailing \": \" is automatically removed after {desc}\n if the latter is empty.\n postfix : *, optional\n Similar to `prefix`, but placed at the end\n (e.g. for additional stats).\n Note: postfix is usually a string (not a dict) for this method,\n and will if possible be set to postfix = ', ' + postfix.\n However other types are supported (#382).\n unit_divisor : float, optional\n [default: 1000], ignored unless `unit_scale` is True.\n initial : int or float, optional\n The initial counter value [default: 0].\n colour : str, optional\n Bar colour (e.g. 'green', '#00ff00').\n\n Returns\n -------\n out : Formatted meter and stats, ready to display.\n \"\"\"\n\n # sanity check: total\n if total and n >= (total + 0.5): # allow float imprecision (#849)\n total = None\n\n # apply custom scale if necessary\n if unit_scale and unit_scale not in (True, 1):\n if total:\n total *= unit_scale\n n *= unit_scale\n if rate:\n rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt\n unit_scale = False\n\n elapsed_str = tqdm.format_interval(elapsed)\n\n # if unspecified, attempt to use rate = average speed\n # (we allow manual override since predicting time is an arcane art)\n if rate is None and elapsed:\n rate = (n - initial) / elapsed\n inv_rate = 1 / rate if rate else None\n format_sizeof = tqdm.format_sizeof\n rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else\n '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'\n rate_inv_fmt = (\n (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))\n if inv_rate else '?') + 's/' + unit\n rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt\n\n if unit_scale:\n n_fmt = format_sizeof(n, divisor=unit_divisor)\n total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'\n else:\n n_fmt = str(n)\n total_fmt = str(total) if total is not None else '?'\n\n try:\n postfix = ', ' + postfix if postfix else ''\n except TypeError:\n pass\n\n remaining = (total - n) / rate if rate and total else 0\n remaining_str = tqdm.format_interval(remaining) if rate else '?'\n try:\n eta_dt = (datetime.now() + timedelta(seconds=remaining)\n if rate and total else datetime.utcfromtimestamp(0))\n except OverflowError:\n eta_dt = datetime.max\n\n # format the stats displayed to the left and right sides of the bar\n if prefix:\n # old prefix setup work around\n bool_prefix_colon_already = (prefix[-2:] == \": \")\n l_bar = prefix if bool_prefix_colon_already else prefix + \": \"\n else:\n l_bar = ''\n\n r_bar = f'| {n_fmt}/{total_fmt} [{elapsed_str}<{remaining_str}, {rate_fmt}{postfix}]'\n\n # Custom bar formatting\n # Populate a dict with all available progress indicators\n format_dict = {\n # slight extension of self.format_dict\n 'n': n, 'n_fmt': n_fmt, 'total': total, 'total_fmt': total_fmt,\n 'elapsed': elapsed_str, 'elapsed_s': elapsed,\n 'ncols': ncols, 'desc': prefix or '', 'unit': unit,\n 'rate': inv_rate if inv_rate and inv_rate > 1 else rate,\n 'rate_fmt': rate_fmt, 'rate_noinv': rate,\n 'rate_noinv_fmt': rate_noinv_fmt, 'rate_inv': inv_rate,\n 'rate_inv_fmt': rate_inv_fmt,\n 'postfix': postfix, 'unit_divisor': unit_divisor,\n 'colour': colour,\n # plus more useful definitions\n 'remaining': remaining_str, 'remaining_s': remaining,\n 'l_bar': l_bar, 'r_bar': r_bar, 'eta': eta_dt,\n **extra_kwargs}\n\n # total is known: we can predict some stats\n if total:\n # fractional and percentage progress\n frac = n / total\n percentage = frac * 100\n\n l_bar += '{0:3.0f}%|'.format(percentage)\n\n if ncols == 0:\n return l_bar[:-1] + r_bar[1:]\n\n format_dict.update(l_bar=l_bar)\n if bar_format:\n format_dict.update(percentage=percentage)\n\n # auto-remove colon for empty `{desc}`\n if not prefix:\n bar_format = bar_format.replace(\"{desc}: \", '')\n else:\n bar_format = \"{l_bar}{bar}{r_bar}\"\n\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar # no `{bar}`; nothing else to do\n\n # Formatting progress bar space available for bar's display\n full_bar = Bar(frac,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,\n colour=colour)\n if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):\n bar_format = str(bar_format)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n\n elif bar_format:\n # user-specified bar_format but no total\n l_bar += '|'\n format_dict.update(l_bar=l_bar, percentage=0)\n full_bar = FormatReplace()\n nobar = bar_format.format(bar=full_bar, **format_dict)\n if not full_bar.format_called:\n return nobar\n full_bar = Bar(0,\n max(1, ncols - disp_len(nobar)) if ncols else 10,\n charset=Bar.BLANK, colour=colour)\n res = bar_format.format(bar=full_bar, **format_dict)\n return disp_trim(res, ncols) if ncols else res\n else:\n # no total: no progressbar, ETA, just progress stats\n return (f'{(prefix + \": \") if prefix else \"\"}'\n f'{n_fmt}{unit} [{elapsed_str}, {rate_fmt}{postfix}]')\n\n def __new__(cls, *_, **__):\n instance = object.__new__(cls)\n with cls.get_lock(): # also constructs lock if non-existent\n cls._instances.add(instance)\n # create monitoring thread\n if cls.monitor_interval and (cls.monitor is None\n or not cls.monitor.report()):\n try:\n cls.monitor = TMonitor(cls, cls.monitor_interval)\n except Exception as e: # pragma: nocover\n warn(\"tqdm:disabling monitor support\"\n \" (monitor_interval = 0) due to:\\n\" + str(e),\n TqdmMonitorWarning, stacklevel=2)\n cls.monitor_interval = 0\n return instance\n\n @classmethod\n def _get_free_pos(cls, instance=None):\n \"\"\"Skips specified instance.\"\"\"\n positions = {abs(inst.pos) for inst in cls._instances\n if inst is not instance and hasattr(inst, \"pos\")}\n return min(set(range(len(positions) + 1)).difference(positions))\n\n @classmethod\n def _decr_instances(cls, instance):\n \"\"\"\n Remove from list and reposition another unfixed bar\n to fill the new gap.\n\n This means that by default (where all nested bars are unfixed),\n order is not maintained but screen flicker/blank space is minimised.\n (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)\n \"\"\"\n with cls._lock:\n try:\n cls._instances.remove(instance)\n except KeyError:\n # if not instance.gui: # pragma: no cover\n # raise\n pass # py2: maybe magically removed already\n # else:\n if not instance.gui:\n last = (instance.nrows or 20) - 1\n # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)\n instances = list(filter(\n lambda i: hasattr(i, \"pos\") and last <= i.pos,\n cls._instances))\n # set first found to current `pos`\n if instances:\n inst = min(instances, key=lambda i: i.pos)\n inst.clear(nolock=True)\n inst.pos = abs(instance.pos)\n\n @classmethod\n def write(cls, s, file=None, end=\"\\n\", nolock=False):\n \"\"\"Print a message via tqdm (without overlap with bars).\"\"\"\n fp = file if file is not None else sys.stdout\n with cls.external_write_mode(file=file, nolock=nolock):\n # Write the message\n fp.write(s)\n fp.write(end)\n\n @classmethod\n @contextmanager\n def external_write_mode(cls, file=None, nolock=False):\n \"\"\"\n Disable tqdm within context and refresh tqdm when exits.\n Useful when writing to standard output stream\n \"\"\"\n fp = file if file is not None else sys.stdout\n\n try:\n if not nolock:\n cls.get_lock().acquire()\n # Clear all bars\n inst_cleared = []\n for inst in getattr(cls, '_instances', []):\n # Clear instance if in the target output file\n # or if write output + tqdm output are both either\n # sys.stdout or sys.stderr (because both are mixed in terminal)\n if hasattr(inst, \"start_t\") and (inst.fp == fp or all(\n f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):\n inst.clear(nolock=True)\n inst_cleared.append(inst)\n yield\n # Force refresh display of bars we cleared\n for inst in inst_cleared:\n inst.refresh(nolock=True)\n finally:\n if not nolock:\n cls._lock.release()\n\n @classmethod\n def set_lock(cls, lock):\n \"\"\"Set the global lock.\"\"\"\n cls._lock = lock\n\n @classmethod\n def get_lock(cls):\n \"\"\"Get the global lock. Construct it if it does not exist.\"\"\"\n if not hasattr(cls, '_lock'):\n cls._lock = TqdmDefaultWriteLock()\n return cls._lock\n\n @classmethod\n def pandas(cls, **tqdm_kwargs):\n \"\"\"\n Registers the current `tqdm` class with\n pandas.core.\n ( frame.DataFrame\n | series.Series\n | groupby.(generic.)DataFrameGroupBy\n | groupby.(generic.)SeriesGroupBy\n ).progress_apply\n\n A new instance will be created every time `progress_apply` is called,\n and each instance will automatically `close()` upon completion.\n\n Parameters\n ----------\n tqdm_kwargs : arguments for the tqdm instance\n\n Examples\n --------\n >>> import pandas as pd\n >>> import numpy as np\n >>> from tqdm import tqdm\n >>> from tqdm.gui import tqdm as tqdm_gui\n >>>\n >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))\n >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc\n >>> # Now you can use `progress_apply` instead of `apply`\n >>> df.groupby(0).progress_apply(lambda x: x**2)\n\n References\n ----------\n <https://stackoverflow.com/questions/18603270/\\\n progress-indicator-during-pandas-operations-python>\n \"\"\"\n from warnings import catch_warnings, simplefilter\n\n from pandas.core.frame import DataFrame\n from pandas.core.series import Series\n try:\n with catch_warnings():\n simplefilter(\"ignore\", category=FutureWarning)\n from pandas import Panel\n except ImportError: # pandas>=1.2.0\n Panel = None\n Rolling, Expanding = None, None\n try: # pandas>=1.0.0\n from pandas.core.window.rolling import _Rolling_and_Expanding\n except ImportError:\n try: # pandas>=0.18.0\n from pandas.core.window import _Rolling_and_Expanding\n except ImportError: # pandas>=1.2.0\n try: # pandas>=1.2.0\n from pandas.core.window.expanding import Expanding\n from pandas.core.window.rolling import Rolling\n _Rolling_and_Expanding = Rolling, Expanding\n except ImportError: # pragma: no cover\n _Rolling_and_Expanding = None\n try: # pandas>=0.25.0\n from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy\n from pandas.core.groupby.generic import DataFrameGroupBy\n except ImportError: # pragma: no cover\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy\n except ImportError:\n from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import GroupBy\n except ImportError: # pragma: no cover\n from pandas.core.groupby import GroupBy\n\n try: # pandas>=0.23.0\n from pandas.core.groupby.groupby import PanelGroupBy\n except ImportError:\n try:\n from pandas.core.groupby import PanelGroupBy\n except ImportError: # pandas>=0.25.0\n PanelGroupBy = None\n\n tqdm_kwargs = tqdm_kwargs.copy()\n deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]\n\n def inner_generator(df_function='apply'):\n def inner(df, func, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n df : (DataFrame|Series)[GroupBy]\n Data (may be grouped).\n func : function\n To be applied on the (grouped) data.\n **kwargs : optional\n Transmitted to `df.apply()`.\n \"\"\"\n\n # Precompute total iterations\n total = tqdm_kwargs.pop(\"total\", getattr(df, 'ngroups', None))\n if total is None: # not grouped\n if df_function == 'applymap':\n total = df.size\n elif isinstance(df, Series):\n total = len(df)\n elif (_Rolling_and_Expanding is None or\n not isinstance(df, _Rolling_and_Expanding)):\n # DataFrame or Panel\n axis = kwargs.get('axis', 0)\n if axis == 'index':\n axis = 0\n elif axis == 'columns':\n axis = 1\n # when axis=0, total is shape[axis1]\n total = df.size // df.shape[axis]\n\n # Init bar\n if deprecated_t[0] is not None:\n t = deprecated_t[0]\n deprecated_t[0] = None\n else:\n t = cls(total=total, **tqdm_kwargs)\n\n if len(args) > 0:\n # *args intentionally not supported (see #244, #299)\n TqdmDeprecationWarning(\n \"Except func, normal arguments are intentionally\" +\n \" not supported by\" +\n \" `(DataFrame|Series|GroupBy).progress_apply`.\" +\n \" Use keyword arguments instead.\",\n fp_write=getattr(t.fp, 'write', sys.stderr.write))\n\n try: # pandas>=1.3.0\n from pandas.core.common import is_builtin_func\n except ImportError:\n is_builtin_func = df._is_builtin_func\n try:\n func = is_builtin_func(func)\n except TypeError:\n pass\n\n # Define bar updating wrapper\n def wrapper(*args, **kwargs):\n # update tbar correctly\n # it seems `pandas apply` calls `func` twice\n # on the first column/row to decide whether it can\n # take a fast or slow code path; so stop when t.total==t.n\n t.update(n=1 if not t.total or t.n < t.total else 0)\n return func(*args, **kwargs)\n\n # Apply the provided function (in **kwargs)\n # on the df using our wrapper (which provides bar updating)\n try:\n return getattr(df, df_function)(wrapper, **kwargs)\n finally:\n t.close()\n\n return inner\n\n # Monkeypatch pandas to provide easy methods\n # Enable custom tqdm progress in pandas!\n Series.progress_apply = inner_generator()\n SeriesGroupBy.progress_apply = inner_generator()\n Series.progress_map = inner_generator('map')\n SeriesGroupBy.progress_map = inner_generator('map')\n\n DataFrame.progress_apply = inner_generator()\n DataFrameGroupBy.progress_apply = inner_generator()\n DataFrame.progress_applymap = inner_generator('applymap')\n\n if Panel is not None:\n Panel.progress_apply = inner_generator()\n if PanelGroupBy is not None:\n PanelGroupBy.progress_apply = inner_generator()\n\n GroupBy.progress_apply = inner_generator()\n GroupBy.progress_aggregate = inner_generator('aggregate')\n GroupBy.progress_transform = inner_generator('transform')\n\n if Rolling is not None and Expanding is not None:\n Rolling.progress_apply = inner_generator()\n Expanding.progress_apply = inner_generator()\n elif _Rolling_and_Expanding is not None:\n _Rolling_and_Expanding.progress_apply = inner_generator()\n\n # override defaults via env vars\n @envwrap(\"TQDM_\", is_method=True, types={'total': float, 'ncols': int, 'miniters': float,\n 'position': int, 'nrows': int})\n def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,\n ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,\n ascii=None, disable=False, unit='it', unit_scale=False,\n dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,\n position=None, postfix=None, unit_divisor=1000, write_bytes=False,\n lock_args=None, nrows=None, colour=None, delay=0.0, gui=False,\n **kwargs):\n \"\"\"see tqdm.tqdm for arguments\"\"\"\n if file is None:\n file = sys.stderr\n\n if write_bytes:\n # Despite coercing unicode into bytes, py2 sys.std* streams\n # should have bytes written to them.\n file = SimpleTextIOWrapper(\n file, encoding=getattr(file, 'encoding', None) or 'utf-8')\n\n file = DisableOnWriteError(file, tqdm_instance=self)\n\n if disable is None and hasattr(file, \"isatty\") and not file.isatty():\n disable = True\n\n if total is None and iterable is not None:\n try:\n total = len(iterable)\n except (TypeError, AttributeError):\n total = None\n if total == float(\"inf\"):\n # Infinite iterations, behave same as unknown\n total = None\n\n if disable:\n self.iterable = iterable\n self.disable = disable\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n self.n = initial\n self.total = total\n self.leave = leave\n return\n\n if kwargs:\n self.disable = True\n with self._lock:\n self.pos = self._get_free_pos(self)\n self._instances.remove(self)\n raise (\n TqdmDeprecationWarning(\n \"`nested` is deprecated and automated.\\n\"\n \"Use `position` instead for manual control.\\n\",\n fp_write=getattr(file, 'write', sys.stderr.write))\n if \"nested\" in kwargs else\n TqdmKeyError(\"Unknown argument(s): \" + str(kwargs)))\n\n # Preprocess the arguments\n if (\n (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))\n ) or dynamic_ncols: # pragma: no cover\n if dynamic_ncols:\n dynamic_ncols = _screen_shape_wrapper()\n if dynamic_ncols:\n ncols, nrows = dynamic_ncols(file)\n else:\n _dynamic_ncols = _screen_shape_wrapper()\n if _dynamic_ncols:\n _ncols, _nrows = _dynamic_ncols(file)\n if ncols is None:\n ncols = _ncols\n if nrows is None:\n nrows = _nrows\n\n if miniters is None:\n miniters = 0\n dynamic_miniters = True\n else:\n dynamic_miniters = False\n\n if mininterval is None:\n mininterval = 0\n\n if maxinterval is None:\n maxinterval = 0\n\n if ascii is None:\n ascii = not _supports_unicode(file)\n\n if bar_format and ascii is not True and not _is_ascii(ascii):\n # Convert bar format into unicode since terminal uses unicode\n bar_format = str(bar_format)\n\n if smoothing is None:\n smoothing = 0\n\n # Store the arguments\n self.iterable = iterable\n self.desc = desc or ''\n self.total = total\n self.leave = leave\n self.fp = file\n self.ncols = ncols\n self.nrows = nrows\n self.mininterval = mininterval\n self.maxinterval = maxinterval\n self.miniters = miniters\n self.dynamic_miniters = dynamic_miniters\n self.ascii = ascii\n self.disable = disable\n self.unit = unit\n self.unit_scale = unit_scale\n self.unit_divisor = unit_divisor\n self.initial = initial\n self.lock_args = lock_args\n self.delay = delay\n self.gui = gui\n self.dynamic_ncols = dynamic_ncols\n self.smoothing = smoothing\n self._ema_dn = EMA(smoothing)\n self._ema_dt = EMA(smoothing)\n self._ema_miniters = EMA(smoothing)\n self.bar_format = bar_format\n self.postfix = None\n self.colour = colour\n self._time = time\n if postfix:\n try:\n self.set_postfix(refresh=False, **postfix)\n except TypeError:\n self.postfix = postfix\n\n # Init the iterations counters\n self.last_print_n = initial\n self.n = initial\n\n # if nested, at initial sp() call we replace '\\r' by '\\n' to\n # not overwrite the outer progress bar\n with self._lock:\n # mark fixed positions as negative\n self.pos = self._get_free_pos(self) if position is None else -position\n\n if not gui:\n # Initialize the screen printer\n self.sp = self.status_printer(self.fp)\n if delay <= 0:\n self.refresh(lock_args=self.lock_args)\n\n # Init the time counter\n self.last_print_t = self._time()\n # NB: Avoid race conditions by setting start_t at the very end of init\n self.start_t = self.last_print_t\n\n def __bool__(self):\n if self.total is not None:\n return self.total > 0\n if self.iterable is None:\n raise TypeError('bool() undefined when iterable == total == None')\n return bool(self.iterable)\n\n def __len__(self):\n return (\n self.total if self.iterable is None\n else self.iterable.shape[0] if hasattr(self.iterable, \"shape\")\n else len(self.iterable) if hasattr(self.iterable, \"__len__\")\n else self.iterable.__length_hint__() if hasattr(self.iterable, \"__length_hint__\")\n else getattr(self, \"total\", None))\n\n def __reversed__(self):\n try:\n orig = self.iterable\n except AttributeError:\n raise TypeError(\"'tqdm' object is not reversible\")\n else:\n self.iterable = reversed(self.iterable)\n return self.__iter__()\n finally:\n self.iterable = orig\n\n def __contains__(self, item):\n contains = getattr(self.iterable, '__contains__', None)\n return contains(item) if contains is not None else item in self.__iter__()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n try:\n self.close()\n except AttributeError:\n # maybe eager thread cleanup upon external error\n if (exc_type, exc_value, traceback) == (None, None, None):\n raise\n warn(\"AttributeError ignored\", TqdmWarning, stacklevel=2)\n\n def __del__(self):\n self.close()\n\n def __str__(self):\n return self.format_meter(**self.format_dict)\n\n @property\n def _comparable(self):\n return abs(getattr(self, \"pos\", 1 << 31))\n\n def __hash__(self):\n return id(self)\n\n def __iter__(self):\n \"\"\"Backward-compatibility to use: for x in tqdm(iterable)\"\"\"\n\n # Inlining instance variables as locals (speed optimisation)\n iterable = self.iterable\n\n # If the bar is disabled, then just walk the iterable\n # (note: keep this check outside the loop for performance)\n if self.disable:\n for obj in iterable:\n yield obj\n return\n\n mininterval = self.mininterval\n last_print_t = self.last_print_t\n last_print_n = self.last_print_n\n min_start_t = self.start_t + self.delay\n n = self.n\n time = self._time\n\n try:\n for obj in iterable:\n yield obj\n # Update and possibly print the progressbar.\n # Note: does not call self.update(1) for speed optimisation.\n n += 1\n\n if n - last_print_n >= self.miniters:\n cur_t = time()\n dt = cur_t - last_print_t\n if dt >= mininterval and cur_t >= min_start_t:\n self.update(n - last_print_n)\n last_print_n = self.last_print_n\n last_print_t = self.last_print_t\n finally:\n self.n = n\n self.close()\n\n def update(self, n=1):\n \"\"\"\n Manually update the progress bar, useful for streams\n such as reading files.\n E.g.:\n >>> t = tqdm(total=filesize) # Initialise\n >>> for current_buffer in stream:\n ... ...\n ... t.update(len(current_buffer))\n >>> t.close()\n The last line is highly recommended, but possibly not necessary if\n `t.update()` will be called in such a way that `filesize` will be\n exactly reached and printed.\n\n Parameters\n ----------\n n : int or float, optional\n Increment to add to the internal counter of iterations\n [default: 1]. If using float, consider specifying `{n:.3f}`\n or similar in `bar_format`, or specifying `unit_scale`.\n\n Returns\n -------\n out : bool or None\n True if a `display()` was triggered.\n \"\"\"\n if self.disable:\n return\n\n if n < 0:\n self.last_print_n += n # for auto-refresh logic to work\n self.n += n\n\n # check counter first to reduce calls to time()\n if self.n - self.last_print_n >= self.miniters:\n cur_t = self._time()\n dt = cur_t - self.last_print_t\n if dt >= self.mininterval and cur_t >= self.start_t + self.delay:\n cur_t = self._time()\n dn = self.n - self.last_print_n # >= n\n if self.smoothing and dt and dn:\n # EMA (not just overall average)\n self._ema_dn(dn)\n self._ema_dt(dt)\n self.refresh(lock_args=self.lock_args)\n if self.dynamic_miniters:\n # If no `miniters` was specified, adjust automatically to the\n # maximum iteration rate seen so far between two prints.\n # e.g.: After running `tqdm.update(5)`, subsequent\n # calls to `tqdm.update()` will only cause an update after\n # at least 5 more iterations.\n if self.maxinterval and dt >= self.maxinterval:\n self.miniters = dn * (self.mininterval or self.maxinterval) / dt\n elif self.smoothing:\n # EMA miniters update\n self.miniters = self._ema_miniters(\n dn * (self.mininterval / dt if self.mininterval and dt\n else 1))\n else:\n # max iters between two prints\n self.miniters = max(self.miniters, dn)\n\n # Store old values for next call\n self.last_print_n = self.n\n self.last_print_t = cur_t\n return True\n\n def close(self):\n \"\"\"Cleanup and (if leave=False) close the progressbar.\"\"\"\n if self.disable:\n return\n\n # Prevent multiple closures\n self.disable = True\n\n # decrement instance pos and remove from internal set\n pos = abs(self.pos)\n self._decr_instances(self)\n\n if self.last_print_t < self.start_t + self.delay:\n # haven't ever displayed; nothing to clear\n return\n\n # GUI mode\n if getattr(self, 'sp', None) is None:\n return\n\n # annoyingly, _supports_unicode isn't good enough\n def fp_write(s):\n self.fp.write(str(s))\n\n try:\n fp_write('')\n except ValueError as e:\n if 'closed' in str(e):\n return\n raise # pragma: no cover\n\n leave = pos == 0 if self.leave is None else self.leave\n\n with self._lock:\n if leave:\n # stats for overall rate (no weighted average)\n self._ema_dt = lambda: None\n self.display(pos=0)\n fp_write('\\n')\n else:\n # clear previous display\n if self.display(msg='', pos=pos) and not pos:\n fp_write('\\r')\n\n def clear(self, nolock=False):\n \"\"\"Clear current bar display.\"\"\"\n if self.disable:\n return\n\n if not nolock:\n self._lock.acquire()\n pos = abs(self.pos)\n if pos < (self.nrows or 20):\n self.moveto(pos)\n self.sp('')\n self.fp.write('\\r') # place cursor back at the beginning of line\n self.moveto(-pos)\n if not nolock:\n self._lock.release()\n\n def refresh(self, nolock=False, lock_args=None):\n \"\"\"\n Force refresh the display of this bar.\n\n Parameters\n ----------\n nolock : bool, optional\n If `True`, does not lock.\n If [default: `False`]: calls `acquire()` on internal lock.\n lock_args : tuple, optional\n Passed to internal lock's `acquire()`.\n If specified, will only `display()` if `acquire()` returns `True`.\n \"\"\"\n if self.disable:\n return\n\n if not nolock:\n if lock_args:\n if not self._lock.acquire(*lock_args):\n return False\n else:\n self._lock.acquire()\n self.display()\n if not nolock:\n self._lock.release()\n return True\n\n def unpause(self):\n \"\"\"Restart tqdm timer from last print time.\"\"\"\n if self.disable:\n return\n cur_t = self._time()\n self.start_t += cur_t - self.last_print_t\n self.last_print_t = cur_t\n\n def reset(self, total=None):\n \"\"\"\n Resets to 0 iterations for repeated use.\n\n Consider combining with `leave=True`.\n\n Parameters\n ----------\n total : int or float, optional. Total to use for the new bar.\n \"\"\"\n self.n = 0\n if total is not None:\n self.total = total\n if self.disable:\n return\n self.last_print_n = 0\n self.last_print_t = self.start_t = self._time()\n self._ema_dn = EMA(self.smoothing)\n self._ema_dt = EMA(self.smoothing)\n self._ema_miniters = EMA(self.smoothing)\n self.refresh()\n\n def set_description(self, desc=None, refresh=True):\n \"\"\"\n Set/modify description of the progress bar.\n\n Parameters\n ----------\n desc : str, optional\n refresh : bool, optional\n Forces refresh [default: True].\n \"\"\"\n self.desc = desc + ': ' if desc else ''\n if refresh:\n self.refresh()\n\n def set_description_str(self, desc=None, refresh=True):\n \"\"\"Set/modify description without ': ' appended.\"\"\"\n self.desc = desc or ''\n if refresh:\n self.refresh()\n\n def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):\n \"\"\"\n Set/modify postfix (additional stats)\n with automatic formatting based on datatype.\n\n Parameters\n ----------\n ordered_dict : dict or OrderedDict, optional\n refresh : bool, optional\n Forces refresh [default: True].\n kwargs : dict, optional\n \"\"\"\n # Sort in alphabetical order to be more deterministic\n postfix = OrderedDict([] if ordered_dict is None else ordered_dict)\n for key in sorted(kwargs.keys()):\n postfix[key] = kwargs[key]\n # Preprocess stats according to datatype\n for key in postfix.keys():\n # Number: limit the length of the string\n if isinstance(postfix[key], Number):\n postfix[key] = self.format_num(postfix[key])\n # Else for any other type, try to get the string conversion\n elif not isinstance(postfix[key], str):\n postfix[key] = str(postfix[key])\n # Else if it's a string, don't need to preprocess anything\n # Stitch together to get the final postfix\n self.postfix = ', '.join(key + '=' + postfix[key].strip()\n for key in postfix.keys())\n if refresh:\n self.refresh()\n\n def set_postfix_str(self, s='', refresh=True):\n \"\"\"\n Postfix without dictionary expansion, similar to prefix handling.\n \"\"\"\n self.postfix = str(s)\n if refresh:\n self.refresh()\n\n def moveto(self, n):\n # TODO: private method\n self.fp.write('\\n' * n + _term_move_up() * -n)\n getattr(self.fp, 'flush', lambda: None)()\n\n @property\n def format_dict(self):\n \"\"\"Public API for read-only member access.\"\"\"\n if self.disable and not hasattr(self, 'unit'):\n return defaultdict(lambda: None, {\n 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})\n if self.dynamic_ncols:\n self.ncols, self.nrows = self.dynamic_ncols(self.fp)\n return {\n 'n': self.n, 'total': self.total,\n 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,\n 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,\n 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,\n 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,\n 'bar_format': self.bar_format, 'postfix': self.postfix,\n 'unit_divisor': self.unit_divisor, 'initial': self.initial,\n 'colour': self.colour}\n\n def display(self, msg=None, pos=None):\n \"\"\"\n Use `self.sp` to display `msg` in the specified `pos`.\n\n Consider overloading this function when inheriting to use e.g.:\n `self.some_frontend(**self.format_dict)` instead of `self.sp`.\n\n Parameters\n ----------\n msg : str, optional. What to display (default: `repr(self)`).\n pos : int, optional. Position to `moveto`\n (default: `abs(self.pos)`).\n \"\"\"\n if pos is None:\n pos = abs(self.pos)\n\n nrows = self.nrows or 20\n if pos >= nrows - 1:\n if pos >= nrows:\n return False\n if msg or msg is None: # override at `nrows - 1`\n msg = \" ... (more hidden) ...\"\n\n if not hasattr(self, \"sp\"):\n raise TqdmDeprecationWarning(\n \"Please use `tqdm.gui.tqdm(...)`\"\n \" instead of `tqdm(..., gui=True)`\\n\",\n fp_write=getattr(self.fp, 'write', sys.stderr.write))\n\n if pos:\n self.moveto(pos)\n self.sp(self.__str__() if msg is None else msg)\n if pos:\n self.moveto(-pos)\n return True\n\n @classmethod\n @contextmanager\n def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):\n \"\"\"\n stream : file-like object.\n method : str, \"read\" or \"write\". The result of `read()` and\n the first argument of `write()` should have a `len()`.\n\n >>> with tqdm.wrapattr(file_obj, \"read\", total=file_obj.size) as fobj:\n ... while True:\n ... chunk = fobj.read(chunk_size)\n ... if not chunk:\n ... break\n \"\"\"\n with cls(total=total, **tqdm_kwargs) as t:\n if bytes:\n t.unit = \"B\"\n t.unit_scale = True\n t.unit_divisor = 1024\n yield CallbackIOWrapper(t.update, stream, method)" } ]
import warnings from .std import TqdmExperimentalWarning from .autonotebook import tqdm as notebook_tqdm from .asyncio import tqdm as asyncio_tqdm from .std import tqdm as std_tqdm
12,682
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=TqdmExperimentalWarning) if notebook_tqdm != std_tqdm:
""" Enables multiple commonly used features. Method resolution order: - `tqdm.autonotebook` without import warnings - `tqdm.asyncio` - `tqdm.std` base class Usage: >>> from tqdm.auto import trange, tqdm >>> for i in trange(10): ... ... """ with warnings.catch_warnings(): warnings.simplefilter("ignore", category=TqdmExperimentalWarning) if notebook_tqdm != std_tqdm:
class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro
0
2023-12-24 15:46:18+00:00
16k
pkariz/grin-explorer
backend/api/views.py
[ { "identifier": "fetch_and_store_block", "path": "backend/api/bootstrap.py", "snippet": "def fetch_and_store_block(blockchain, block_height, prefetch=True):\n # initialize node api\n node_api = NodeV2API(blockchain.node)\n if block_height < 0:\n # no such block height\n raise NodeBlockNotFoundException()\n if prefetch:\n block_data = get_prefetched_header_and_block_data(blockchain.node, block_height)\n else:\n block_data = node_api.get_block(height=block_height)\n header_data = block_data['header']\n timestamp = parse_datetime(header_data['timestamp'])\n hash = header_data['hash']\n # create header instance\n cuckoo_solution = ','.join(map(str, header_data['cuckoo_solution']))\n with transaction.atomic():\n header, header_created = BlockHeader.objects.get_or_create(\n blockchain=blockchain,\n cuckoo_solution=cuckoo_solution,\n kernel_root=header_data['kernel_root'],\n defaults={\n 'version': header_data['version'],\n 'output_root': header_data['output_root'],\n 'range_proof_root': header_data['range_proof_root'],\n 'kernel_mmr_size': header_data['kernel_mmr_size'],\n 'output_mmr_size': header_data['output_mmr_size'],\n 'nonce': str(header_data['nonce']),\n 'edge_bits': header_data['edge_bits'],\n 'secondary_scaling': header_data['secondary_scaling'],\n 'total_difficulty': header_data['total_difficulty'],\n 'total_kernel_offset': header_data['total_kernel_offset'],\n }\n )\n # create block instance\n try:\n block, block_created = Block.objects.get_or_create(\n blockchain=blockchain,\n hash=hash,\n height=block_height,\n timestamp=timestamp,\n header=header,\n prev_hash=block_data['header']['previous'],\n reorg=None,\n nr_inputs=len(block_data['inputs']),\n nr_outputs=len(block_data['outputs']),\n nr_kernels=len(block_data['kernels']),\n )\n except IntegrityError as e:\n # race condition so it's a duplicate. We can skip creation process\n # and just return the block that we already have\n return Block.objects.get(blockchain=blockchain, hash=hash)\n\n if not block_created:\n # we have already fetched all the data since it's done in an atomic\n # transaction, so skip unnecessary work\n return block\n\n # bulk create kernels\n kernels = []\n for kernel_data in block_data['kernels']:\n kernels.append(\n Kernel(\n block=block,\n features=kernel_data['features'],\n fee=kernel_data['fee'],\n fee_shift=kernel_data['fee_shift'],\n lock_height=kernel_data['lock_height'],\n excess=kernel_data['excess'],\n excess_sig=kernel_data['excess_sig'],\n )\n )\n Kernel.objects.bulk_create(kernels)\n\n inputs = []\n # create input instances\n outputs_data = Output.objects\\\n .filter(\n commitment__in=block_data['inputs'],\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\\\n .values('id', 'commitment')\n outputs_mapper = { output_data['commitment'] : output_data['id'] for output_data in outputs_data }\n for input_data in block_data['inputs']:\n inputs.append(\n Input(\n block=block,\n commitment=input_data,\n output_id=outputs_mapper.get(input_data),\n )\n )\n Input.objects.bulk_create(inputs)\n # mark the corresponding outputs as spent, but only on the main chain so\n # that we don't corrupt the reorged data\n Output.objects.filter(pk__in=outputs_mapper.values()).update(spent=True)\n\n # create output instances\n outputs = []\n inputs = Input.objects\\\n .filter(\n commitment__in=list(map(lambda x: x['commit'], block_data['outputs'])),\n block__reorg__isnull=True,\n block__blockchain=block.blockchain,\n )\n inputs_mapper = { input.commitment : input for input in inputs }\n for output_data in block_data['outputs']:\n outputs.append(\n Output(\n block=block,\n output_type=output_data['output_type'],\n commitment=output_data['commit'],\n spent=output_data['spent'],\n proof=output_data['proof'],\n proof_hash=output_data['proof_hash'],\n merkle_proof=output_data['merkle_proof'],\n mmr_index=output_data['mmr_index'],\n )\n )\n outputs = Output.objects.bulk_create(outputs)\n # link inputs to created outputs, but only on the main chain so that we\n # don't corrupt the reorged data\n fixed_inputs = []\n for output in outputs:\n matching_input = inputs_mapper.get(output.commitment)\n if matching_input:\n matching_input.output = output\n fixed_inputs.append(matching_input)\n Input.objects.bulk_update(fixed_inputs, ['output'])\n return block" }, { "identifier": "update_blockchain_progress", "path": "backend/api/bootstrap.py", "snippet": "def update_blockchain_progress(blockchain):\n try:\n start_height, end_height = blockchain.get_bootstrap_heights()\n except Exception as e:\n logger.warning(\n 'Failed to get bootstrap heights',\n extra={ 'blockchain': blockchain.slug },\n )\n raise UpdateBlockchainProgressError(blockchain.slug)\n expected_heights = set(range(start_height, end_height + 1))\n existing_heights = set(list(\n blockchain.blocks\\\n .filter(reorg__isnull=True)\\\n .values_list('height', flat=True)\n ))\n missing_heights = expected_heights - existing_heights\n update_load_progress(\n blockchain, \n len(missing_heights),\n end_height - start_height + 1,\n 1,\n 1,\n 2,\n verbose=True\n )" }, { "identifier": "UpdateBlockchainProgressError", "path": "backend/api/exceptions.py", "snippet": "class UpdateBlockchainProgressError(Exception):\n pass" }, { "identifier": "get_filter_backends", "path": "backend/api/helpers.py", "snippet": "def get_filter_backends(replacements):\n \"\"\"\n Returns a tuple of filter backends where default ones, from DefaultMixin,\n are replaced with the given replacements.\n\n Args:\n replacements: dict where key is an existing filter backend class's\n __name__ and value is its replacement filter backend class\n \"\"\"\n current_filters = DefaultMixin.filter_backends\n return tuple([\n filter if filter.__name__ not in replacements else replacements[filter.__name__]\n for filter in list(current_filters)\n ])" }, { "identifier": "load_data_from_redis", "path": "backend/api/helpers.py", "snippet": "def load_data_from_redis(redis_key):\n r = redis.Redis(host='redis')\n data = r.get(redis_key)\n if data is None:\n return\n return json.loads(data)" }, { "identifier": "BlockFilter", "path": "backend/api/filters.py", "snippet": "class BlockFilter(filters.FilterSet):\n class Meta:\n model = Block\n fields = ('blockchain', 'height', 'hash')" }, { "identifier": "CustomBlockSearchFilter", "path": "backend/api/filters.py", "snippet": "class CustomBlockSearchFilter(DRFfilters.SearchFilter):\n \"\"\"\n Alongside the given search_fields this filter filters also by:\n - keyword 'reorgs' --> return only blocks where reorgs happened\n - ['inputs', 'outputs', 'kernels'] ['=', '<', '>', '<=', '>='] [value] -->\n return only blocks matching this computation, eg: 'inputs > 2'\n You cannot combine different types of search (eg. 'reorgs' + 'computation')\n \"\"\"\n\n def filter_queryset(self, request, queryset, view):\n queryset = super().filter_queryset(request, queryset, view)\n blockchain_slug = view.kwargs['blockchain_slug']\n original_search_terms = self.get_search_terms(request)\n search_terms = self._get_normalized_search_terms(original_search_terms)\n if len(search_terms) == 0:\n # searches:\n # - height --> add filter reorg=None\n # - hash --> nothing to add\n # - outputhash --> add filter reorg=None\n # - block-detail --> nothing to add\n # - block-list --> add filter reorg=None\n if len(original_search_terms) > 1:\n raise APIException('Too many standard search terms')\n if not original_search_terms:\n # it's either an unfiltered block-list or block-detail\n if view.action == 'list':\n queryset = queryset.filter(reorg=None)\n else:\n # there's only 1 original search term, figure out which one\n if len(original_search_terms[0]) != 64:\n # it's not block hash but either block height or output hash\n # in both cases we need to filter out reorgs\n queryset = queryset.filter(reorg=None)\n return queryset\n searched_types = set(map(lambda x: x['type'], search_terms))\n if len(searched_types) > 1:\n raise APIException('Cannot combine different types of searches')\n if searched_types == { 'reorgs' }:\n return self._get_reorgs_qs(blockchain_slug)\n elif searched_types == { 'computation' }:\n return self._get_computations_qs(search_terms, blockchain_slug)\n elif searched_types == { 'hash' }:\n return self._get_hash_qs(search_terms[0]['value'], blockchain_slug, queryset)\n elif searched_types == { 'height' }:\n return self._get_height_qs(search_terms[0]['value'], blockchain_slug)\n elif searched_types == { 'kernel_or_output' }:\n return self._get_kernel_or_output_qs(\n search_terms[0]['value'], blockchain_slug)\n else:\n logger.exception(\n 'Invalid search terms',\n exc_info=e,\n extra={'search_terms': search_terms}\n )\n raise APIException('Invalid search terms')\n\n def _get_normalized_search_terms(self, search_terms):\n \"\"\"\n Search terms of format ['outputs>1'] are not supported. Instead, the\n operators should be surrounded by spaces, eg. ['outputs', '>', '1'].\n Supported operators are ['=', '>', '<', '<=', '>=']\n \"\"\"\n supported_operators = ['=', '>', '<', '<=', '>=']\n normalized_terms = []\n i = 0\n while i <= len(search_terms) - 1:\n if isinstance(search_terms[i], str) and search_terms[i].lower() in ['inputs', 'outputs', 'kernels']:\n operator = search_terms[i+1]\n if operator not in supported_operators:\n raise APIException('Invalid search operator')\n value = int(search_terms[i+2])\n if value < 0:\n raise APIException('Invalid search computation')\n normalized_terms.append({\n 'type': 'computation',\n 'source': search_terms[i],\n 'op': operator,\n 'value': value,\n })\n i += 3\n elif isinstance(search_terms[i], str) and search_terms[i].lower() == 'reorgs':\n normalized_terms.append({ 'type': 'reorgs' })\n i += 1\n elif len(search_terms[i]) == 64:\n # hash\n normalized_terms.append({\n 'type': 'hash',\n 'value': search_terms[i],\n })\n i += 1\n elif len(search_terms[i]) == 66:\n # kernel excess or output commitment\n normalized_terms.append({\n 'type': 'kernel_or_output',\n 'value': search_terms[i],\n })\n i += 1\n else:\n try:\n value = int(search_terms[i])\n except ValueError:\n value = None\n if value >= 0:\n normalized_terms.append({\n 'type': 'height',\n 'value': value,\n })\n i += 1\n else:\n # term which is not for this custom search, eg. block hash\n i += 1\n return normalized_terms\n\n def _get_reorgs_qs(self, blockchain_slug):\n # NOTE: we first filter, then calculate reorg_len on filtered data and\n # then filter on annotated data that we've calculated\n reorg_heights = list(Reorg.objects\\\n .select_related('start_main_block')\\\n .filter(\n blockchain__slug=blockchain_slug,\n start_main_block__reorg=None,\n )\\\n .annotate(reorg_len=F('end_reorg_block__height') - F('start_reorg_block__height') + 1)\\\n .filter(reorg_len__gte=settings.MIN_REORG_LEN)\\\n .values_list('start_main_block__height', flat=True)\n )\n queryset = Block.objects\\\n .filter(\n blockchain__slug=blockchain_slug,\n reorg=None,\n height__in=reorg_heights,\n )\\\n .order_by('-height')\n return queryset\n\n def _get_hash_qs(self, hash, blockchain_slug, queryset):\n return queryset.filter(\n blockchain__slug=blockchain_slug,\n hash=hash,\n )\n\n def _get_height_qs(self, height, blockchain_slug):\n return Block.objects.filter(\n blockchain__slug=blockchain_slug,\n height=height,\n )\n\n def _get_kernel_or_output_qs(self, kernel_or_output, blockchain_slug):\n kernel = Kernel.objects.filter(\n excess=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if kernel:\n return Block.objects.filter(hash=kernel.block.hash)\n output = Output.objects.filter(\n commitment=kernel_or_output,\n block__blockchain__slug=blockchain_slug,\n ).first()\n if output:\n return Block.objects.filter(hash=output.block.hash)\n return Block.objects.none()\n\n def _get_computations_qs(self, search_terms, blockchain_slug):\n operator_mapping = {\n '=': '',\n '>': '__gt',\n '<': '__lt',\n '<=': '__lte',\n '>=': '__gte',\n }\n possible_sources = ['inputs', 'outputs', 'kernels']\n searched_sources = set(map(lambda x: x['source'], search_terms))\n op_searched_types = set(possible_sources) & set(searched_sources)\n op_qs = Blockchain.objects.get(slug=blockchain_slug).blocks.all()\n for search_term in search_terms:\n filters = {\n 'blockchain__slug': blockchain_slug,\n 'reorg': None,\n }\n op_map = operator_mapping[search_term['op']]\n filters[f'nr_{search_term[\"source\"]}{op_map}'] = search_term['value']\n op_qs = op_qs.filter(**filters).order_by('-height')\n return op_qs" }, { "identifier": "NodeFilter", "path": "backend/api/filters.py", "snippet": "class NodeFilter(filters.FilterSet):\n class Meta:\n model = Node\n fields = ('name', 'slug', 'archive')" }, { "identifier": "NodeGroupFilter", "path": "backend/api/filters.py", "snippet": "class NodeGroupFilter(filters.FilterSet):\n class Meta:\n model = NodeGroup\n fields = ('name', 'slug')" }, { "identifier": "CustomModelViewSet", "path": "backend/api/mixins.py", "snippet": "class CustomModelViewSet(\n DefaultMixin,\n viewsets.ModelViewSet\n):\n \"\"\"Default viewset for models.\"\"\"\n pass" }, { "identifier": "Blockchain", "path": "backend/api/models.py", "snippet": "class Blockchain(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n # testnet, mainnet etc\n name = models.CharField(max_length=255, unique=True)\n # slug of the name, we use it in url\n slug = models.SlugField(max_length=255, unique=True)\n # node from which the data is fetched\n node = models.ForeignKey(\n Node, related_name='blockchains', on_delete=models.PROTECT)\n # the default blockchain will be picked on the gui by default\n default = models.BooleanField(default=False)\n # if fetch_price is False then the shown price will always be 0.\n # Testnets and localnets should have this set to false.\n fetch_price = models.BooleanField(default=True)\n # load_progress shows current % of loaded blocks. If archive is True then\n # load_progress will represent % of missing all blocks, otherwise % of\n # missing blocks from the latest 1440 blocks\n load_progress = models.DecimalField(\n max_digits=5,\n decimal_places=2,\n default=0.0,\n validators=[MinValueValidator(0), MaxValueValidator(100)]\n )\n\n def __str__(self):\n return f'{self.name} - {self.load_progress} [Node<{self.node}>]'\n\n def bootstrap(self, skip_reorg_check=False):\n # import here to avoid cyclic import\n from .bootstrap import load_blocks\n\n start_height, end_height = self.get_bootstrap_heights()\n load_blocks(self, start_height, end_height, skip_reorg_check)\n\n def get_tip_height(self):\n node_api = NodeV2API(self.node)\n try:\n end_block = node_api.get_tip()['height']\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get node tip')\n raise e\n return end_block\n\n def get_progress_decimal_places(self):\n if self.node.archive:\n return 2\n return 0\n\n def get_bootstrap_heights(self):\n node_api = NodeV2API(self.node)\n end_height = self.get_tip_height()\n try:\n start_height = node_api.get_blocks(0, end_height, 1, False)['blocks'][0]['header']['height']\n except IndexError:\n raise Exception('Node has no blocks.')\n except NodeError as e:\n logger.exception('Bootstrap failed - failed to get first block height')\n raise e\n return start_height, end_height\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n if self.default:\n # set other blockchain.default to False\n other_blockchains = Blockchain.objects.all()\n if self.pk:\n other_blockchains = other_blockchains.exclude(pk=self.pk)\n other_blockchains.update(default=False)\n # blockchain doesn't change much so this call doesn't hurt\n old_instance = Blockchain.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.load_progress != old_instance.load_progress:\n # load progress changed, send info\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_progress_changed',\n 'message': {\n 'slug': self.slug,\n # convert to float since Decimal is not serializable\n 'load_progress': float(self.load_progress),\n },\n }\n )\n return res\n\n def full_print(self):\n \"\"\"Used for developing and debugging.\"\"\"\n print('MAIN CHAIN:')\n for block in self.blocks.filter(reorg=None).order_by('height'):\n print(' --> ' + block.hash)\n for reorg in Reorg.objects.filter(blockchain=self):\n print('REORG:')\n for block in Block.objects.filter(reorg=reorg).order_by('height'):\n print(' --> ' + block.hash)\n print('------------------------------------------------------')\n\n def reset(self):\n \"\"\"Used for developing and debugging.\"\"\"\n from .models import Block, BlockHeader, Input, Output, Kernel, DramatiqTask, Reorg\n from django.contrib.contenttypes.models import ContentType\n from decimal import Decimal\n\n Input.objects.filter(block__blockchain=self).delete()\n Output.objects.filter(block__blockchain=self).delete()\n Kernel.objects.filter(block__blockchain=self).delete()\n self.reorgs.all().delete()\n\n content_type = ContentType.objects.get_for_model(self)\n DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=self.id,\n ).delete()\n # removing header will also remove the block\n BlockHeader.objects.filter(block__blockchain=self).delete()\n self.load_progress = Decimal('0')\n self.save()" }, { "identifier": "Block", "path": "backend/api/models.py", "snippet": "class Block(TimeStampedModel):\n blockchain = models.ForeignKey(\n Blockchain, related_name='blocks', on_delete=models.CASCADE)\n hash = models.CharField(\n primary_key=True,\n max_length=64,\n validators=[MinLengthValidator(64)],\n db_index=True,\n )\n height = models.PositiveIntegerField(db_index=True)\n timestamp = models.DateTimeField(db_index=True)\n header = models.ForeignKey(\n 'BlockHeader', related_name='block', on_delete=models.CASCADE)\n prev_hash = models.CharField(\n max_length=64,\n null=True,\n blank=True,\n validators=[MinLengthValidator(64)],\n )\n nr_inputs = models.PositiveIntegerField(default=0)\n nr_outputs = models.PositiveIntegerField(default=0)\n nr_kernels = models.PositiveIntegerField(default=0)\n # when reorg is set it means this block is part of a reorg and not the main\n # chain\n reorg = models.ForeignKey(\n 'Reorg', null=True, related_name='blocks', on_delete=models.CASCADE)\n\n def __str__(self):\n suffix = ''\n if self.reorg:\n suffix = ' Reorged: {}'.format(self.reorg.id)\n return '{}: {} (prev: {})'.format(\n self.height, self.hash, self.prev_hash)\n\n def get_next_block(self):\n return Block.objects.filter(prev_hash=self.hash).first()\n\n def get_previous_block(self):\n return Block.objects.filter(hash=self.prev_hash).first()\n\n def full_print(self, prefix=''):\n \"\"\"Used for developing and debugging.\"\"\"\n print('---------------------------------------------------------------')\n print(f'{prefix}Block {self.height}: {self.hash}, reorg: {self.reorg}')\n print(f'{prefix} INPUTS:')\n for input in self.inputs.all():\n print(f'{prefix} {input}, output: {input.output}')\n print(f'{prefix} OUTPUTS:')\n for output in self.outputs.all():\n print(f'{prefix} {output}')\n print(f'{prefix} KERNELS:')\n for kernel in self.kernels.all():\n print(f'{prefix} {kernel}')\n print('---------------------------------------------------------------')" }, { "identifier": "Reorg", "path": "backend/api/models.py", "snippet": "class Reorg(TimeStampedModel):\n id = models.BigAutoField(primary_key=True)\n blockchain = models.ForeignKey(\n Blockchain, related_name='reorgs', on_delete=models.CASCADE)\n # start_reorg_block and end_reorg_block define starting and ending block,\n # which were reorged\n start_reorg_block = models.ForeignKey(\n Block, related_name='start_reorgs', on_delete=models.CASCADE)\n end_reorg_block = models.ForeignKey(\n Block, related_name='end_reorgs', on_delete=models.CASCADE)\n # start_main_block defines starting block which is the new start of the main\n # chain - the block that replaced start_reorg_block. We usually don't know\n # which the ending block is when we spot the reorg, so we don't store it\n # (we don't even have it in DB at that time yet since we usually get them\n # incrementally in the order they're accepted).\n start_main_block = models.ForeignKey(\n Block, related_name='start_mains', on_delete=models.CASCADE)\n\n def __str__(self):\n return '{}: start: {}, end: {}'.format(\n self.blockchain.slug, self.start_reorg_block, self.end_reorg_block)" }, { "identifier": "Node", "path": "backend/api/models.py", "snippet": "class Node(TimeStampedModel):\n \"\"\"Node on the network. Currently it only supports grin-rust.\"\"\"\n id = models.BigAutoField(primary_key=True)\n # name can be whatever\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n group = models.ForeignKey(\n NodeGroup, related_name='nodes', on_delete=models.PROTECT)\n # foreign api url of the grin-rust node\n api_url = models.URLField()\n # username of the grin-rust node\n api_username = models.CharField(max_length=255)\n # foreign api secret of the grin-rust node\n api_password = models.CharField(max_length=255)\n # if archive is true then we fetch every block when we bootstrap, otherwise\n # we fetch only latest 1440 blocks (1 day)\n archive = models.BooleanField(default=False)\n\n def __str__(self):\n repr = f'{self.name}'\n if self.archive:\n repr += ' (archive)'\n return repr\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n return super().save(*args, **kwargs)\n\n def is_reachable(self):\n try:\n NodeV2API(self).get_tip()\n return True\n except (\n RequestsConnectionError,\n RequestsTimeout,\n RequestsHTTPError,\n RequestsReadTimeout\n ):\n logger.exception('Node unreachable', extra={'node': self.slug})\n return False" }, { "identifier": "NodeGroup", "path": "backend/api/models.py", "snippet": "class NodeGroup(models.Model):\n \"\"\"\n NodeGroup represents a group of nodes. These nodes should be on the same\n network.:\n \"\"\"\n id = models.BigAutoField(primary_key=True)\n # name is probably mainnet, testnet or smth similar\n name = models.CharField(max_length=255, unique=True)\n # by default that's slug of the name\n slug = models.SlugField(max_length=255, unique=True)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify(self.name, to_lower=True)\n else:\n self.slug = self.slug.lower()\n self.full_clean()\n return super().save(*args, **kwargs)" }, { "identifier": "DramatiqTask", "path": "backend/api/models.py", "snippet": "class DramatiqTask(TimeStampedModel):\n \"\"\"We store task's message_id so that we can abort the task.\"\"\"\n\n class Type(models.TextChoices):\n BOOTSTRAP = 'bootstrap', 'Bootstrap'\n BLOCKCHAIN_DELETE = 'blockchain_delete', 'Blockchain delete'\n\n class Status(models.TextChoices):\n # NOTE: IN_PROGRESS doesn't really mean it's already in progress, just\n # that it has been sent\n IN_PROGRESS = 'in_progress', 'In progress'\n SKIPPED = 'skipped', 'Skipped'\n SUCCESS = 'success', 'Success'\n FAILURE = 'failure', 'Failure'\n\n id = models.BigAutoField(primary_key=True)\n message_id = models.CharField(max_length=255, unique=True)\n # type tells us what this task is doing, eg. 'bootstrap'\n type = models.CharField(max_length=255, choices=Type.choices)\n status = models.CharField(max_length=255, choices=Status.choices)\n # failure_reason should be short and concise\n failure_reason = models.TextField(null=True, default=None)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n def save(self, *args, **kwargs):\n from .serializers import DramatiqTaskSerializer\n old_instance = DramatiqTask.objects.get(pk=self.pk) if self.pk else None\n res = super().save(*args, **kwargs)\n if old_instance and self.status != old_instance.status:\n # status changed, send info\n print('sending task status update')\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'task_status_changed',\n 'message': DramatiqTaskSerializer(self).data,\n }\n )\n return res" }, { "identifier": "BlockchainSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockchainSerializer(serializers.ModelSerializer):\n node = serializers.PrimaryKeyRelatedField(queryset=Node.objects.all(), write_only=True)\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'default', 'node', 'load_progress', 'fetch_price')" }, { "identifier": "BlockchainExtendedSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockchainExtendedSerializer(serializers.ModelSerializer):\n tasks = serializers.SerializerMethodField()\n\n class Meta:\n model = Blockchain\n fields = ('name', 'slug', 'node', 'default', 'load_progress', 'fetch_price', 'tasks')\n\n def to_representation(self, obj):\n self.fields['node'] = NodeSerializer()\n return super().to_representation(obj)\n\n def get_tasks(self, blockchain):\n content_type = ContentType.objects.get_for_model(blockchain)\n tasks = DramatiqTask.objects.filter(\n content_type=content_type,\n object_id=blockchain.id,\n )\n return DramatiqTaskSimpleSerializer(tasks, many=True).data" }, { "identifier": "BlockSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockSerializer(serializers.ModelSerializer):\n blockchain = BlockchainSerializer()\n header = BlockHeaderSerializer()\n starting_reorg_blocks = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'reorg',\n 'nr_kernels',\n 'nr_inputs',\n 'nr_outputs',\n 'blockchain',\n 'starting_reorg_blocks',\n )\n\n def get_starting_reorg_blocks(self, block):\n reorgs = Reorg.objects.filter(start_main_block=block)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return BlockSerializer(\n [reorg.start_reorg_block for reorg in reorgs], many=True).data" }, { "identifier": "BlockDetailSerializer", "path": "backend/api/serializers.py", "snippet": "class BlockDetailSerializer(serializers.ModelSerializer):\n header = BlockHeaderSerializer()\n kernels = KernelSerializer(many=True)\n inputs = InputSerializer(many=True)\n outputs = OutputSerializer(many=True)\n blockchain = BlockchainSerializer()\n confirmations = serializers.SerializerMethodField()\n next_hash = serializers.SerializerMethodField()\n next_block_reorgs = serializers.SerializerMethodField()\n\n class Meta:\n model = Block\n fields = (\n 'hash',\n 'height',\n 'timestamp',\n 'header',\n 'prev_hash',\n 'kernels',\n 'inputs',\n 'outputs',\n 'blockchain',\n 'confirmations',\n 'next_hash',\n 'reorg',\n 'next_block_reorgs',\n )\n\n def get_confirmations(self, block):\n # in reorged blocks we show confirmations based on the reorged chain!\n tip_height = block.blockchain.blocks\\\n .filter(reorg=block.reorg)\\\n .order_by('-height')\\\n .first().height\n return tip_height - block.height + 1\n\n def get_next_hash(self, block):\n try:\n return Block.objects.get(\n blockchain=block.blockchain,\n reorg=block.reorg,\n prev_hash=block.hash\n ).hash\n except Block.DoesNotExist:\n return None\n\n def get_next_block_reorgs(self, block):\n from .serializers import ReorgSerializer\n reorgs = Reorg.objects.filter(start_main_block__prev_hash=block.hash)\n reorgs = list(filter(\n lambda reorg: reorg.end_reorg_block.height - \\\n reorg.start_reorg_block.height + 1 >= settings.MIN_REORG_LEN,\n reorgs\n ))\n return ReorgSerializer(reorgs, many=True).data" }, { "identifier": "NodeSerializer", "path": "backend/api/serializers.py", "snippet": "class NodeSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Node\n fields = '__all__'" }, { "identifier": "NodeGroupSerializer", "path": "backend/api/serializers.py", "snippet": "class NodeGroupSerializer(serializers.ModelSerializer):\n nodes = NodeSerializer(many=True, read_only=True)\n\n class Meta:\n model = NodeGroup\n fields = '__all__'" }, { "identifier": "DramatiqTaskSerializer", "path": "backend/api/serializers.py", "snippet": "class DramatiqTaskSerializer(serializers.ModelSerializer):\n content_object = serializers.SerializerMethodField()\n\n class Meta:\n model = DramatiqTask\n fields = (\n 'id',\n 'message_id',\n 'type',\n 'status',\n 'failure_reason',\n 'content_object',\n )\n\n def get_content_object(self, task):\n from .serializers import BlockchainSerializer\n serializer_mapper = {\n 'Blockchain': BlockchainSerializer,\n }\n klass = task.content_object.__class__\n return {\n 'model': klass._meta.model_name,\n 'data': serializer_mapper[klass.__name__](task.content_object).data,\n }" }, { "identifier": "bootstrap_blockchain", "path": "backend/api/tasks.py", "snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef bootstrap_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).bootstrap()" }, { "identifier": "delete_blockchain", "path": "backend/api/tasks.py", "snippet": "@dramatiq.actor(max_retries=0, time_limit=float(\"inf\"))\ndef delete_blockchain(blockchain_slug):\n # import here to avoid cyclic import\n from .models import Blockchain\n Blockchain.objects.get(slug=blockchain_slug).delete()\n async_to_sync(get_channel_layer().group_send)(\n 'admin_group',\n {\n 'type': 'blockchain_deleted',\n 'message': {\n 'slug': blockchain_slug,\n },\n }\n )" } ]
from asgiref.sync import async_to_sync from django.contrib.contenttypes.models import ContentType from django.db.models.deletion import ProtectedError from django.views.generic import TemplateView from django.views.decorators.cache import never_cache from dramatiq_abort import abort from rest_framework import status from rest_framework.exceptions import APIException from rest_framework.exceptions import NotFound from rest_framework.exceptions import ValidationError as DRFValidationError from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from slugify import slugify from .bootstrap import fetch_and_store_block, update_blockchain_progress from .exceptions import UpdateBlockchainProgressError from .helpers import get_filter_backends, load_data_from_redis from .filters import ( BlockFilter, CustomBlockSearchFilter, NodeFilter, NodeGroupFilter, ) from .mixins import CustomModelViewSet from .models import Blockchain, Block, Reorg, Node, NodeGroup, DramatiqTask from .serializers import ( BlockchainSerializer, BlockchainExtendedSerializer, BlockSerializer, BlockDetailSerializer, NodeSerializer, NodeGroupSerializer, DramatiqTaskSerializer, ) from .tasks import bootstrap_blockchain, delete_blockchain import channels import logging import pytz
11,114
# happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged' web_socket_msg = BlockSerializer(new_block).data if web_socket_msg_type == 'reorged': web_socket_msg = blockchain.slug # TODO: check if channels-redis 4.x is fixed: https://github.com/django/channels_redis/issues/332 channel_layer = channels.layers.get_channel_layer() async_to_sync(channel_layer.group_send)( 'default_group', { 'type': web_socket_msg_type, 'message': web_socket_msg, } ) # update the loading progress since it could be skewed due to the # periodic task updating it before this view has been called try: update_blockchain_progress(blockchain) except UpdateBlockchainProgressError: # ignore it, let it update itself the next time pass return Response(status=status.HTTP_200_OK) def get_permissions(self): """ Add, delete and update require authentication, others don't. """ # accepted view can currently be called by anyone, we ignore its data though # and fetch it from our node. Maybe in the future node could send some # header to prevent potential spam permission_classes = [] if self.action not in ['list', 'retrieve', 'accepted', 'graphs']: permission_classes = [IsAuthenticated] return [permission() for permission in permission_classes] class BlockViewSet(CustomModelViewSet): """API endpoint for Block. This ViewSet is nested in BlockchainViewSet.""" queryset = Block.objects\ .order_by('-height')\ .all() filter_backends = get_filter_backends({
logger = logging.getLogger(__name__) # Serve Vue Application index_view = never_cache(TemplateView.as_view(template_name='index.html')) class NodeGroupViewSet(CustomModelViewSet): """API endpoint for NodeGroup.""" queryset = NodeGroup.objects.all() filterset_class = NodeGroupFilter serializer_class = NodeGroupSerializer lookup_field = 'slug' permission_classes = [IsAuthenticated] def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) return super().create(request, *args, **kwargs) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node group is related to nodes, delete them first') class NodeViewSet(CustomModelViewSet): """API endpoint for Node.""" queryset = Node.objects.all() filterset_class = NodeFilter serializer_class = NodeSerializer # currently all node views require authentication permission_classes = [IsAuthenticated] lookup_field = 'slug' def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().create(request, *args, **kwargs) def update(self, request, *args, **kwargs): # NOTE: super().partial_update calls update(..., partial=True) if not kwargs.get('partial'): # we don't allow full updates - aka PUT raise DRFPermissionDenied() return super().update(request, *args, **kwargs) def partial_update(self, request, slug=None): request.data['group'] = NodeGroup.objects.get(slug=request.data['group']).pk return super().partial_update(request, slug=slug) @action(detail=True, methods=['get']) def reachable(self, request, slug=None): node = self.get_object() try: res = node.is_reachable() except Exception as e: logger.exception('Unreachable node') res = False return Response(res, status=status.HTTP_200_OK) def destroy(self, request, *args, **kwargs): try: return super().destroy(request, *args, **kwargs) except ProtectedError as e: raise DRFValidationError( detail='Node is related to blockchains, delete them first') class BlockchainViewSet(CustomModelViewSet): """API endpoint for Blockchain.""" queryset = Blockchain.objects.all() serializer_class = BlockchainSerializer lookup_field = 'slug' def get_serializer_class(self): # when authenticated we return also NodeSerializer data if self.request.user.is_authenticated: return BlockchainExtendedSerializer return BlockchainSerializer def create(self, request, *args, **kwargs): slug = request.data.get('slug') if not slug: request.data['slug'] = slugify(request.data['name'], to_lower=True) request.data['node'] = request.data['node'] return super().create(request, *args, **kwargs) def destroy(self, request, slug=None): instance = self.get_object() message = delete_blockchain.send(instance.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=instance, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) def _abort_previous_tasks(self, blockchain): conflicting_message_ids = DramatiqTask.objects.filter( status=DramatiqTask.Status.IN_PROGRESS, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).values_list('message_id', flat=True) # abort previous conflicting tasks if they exist for conflicting_message_id in conflicting_message_ids: abort(conflicting_message_id) @action(detail=True, methods=['post']) def bootstrap(self, request, slug=None): blockchain = self.get_object() if not blockchain.node.is_reachable: raise APIException(detail='Node is unreachable') self._abort_previous_tasks(blockchain) # create a new task message = bootstrap_blockchain.send(blockchain.slug) task = DramatiqTask.objects.create( type=DramatiqTask.Type.BOOTSTRAP, status=DramatiqTask.Status.IN_PROGRESS, message_id=message.message_id, content_object=blockchain, ) return Response( DramatiqTaskSerializer(task).data, status=status.HTTP_200_OK) @action( detail=True, methods=['post'], url_path='bootstrap/abort', url_name='bootstrap-abort', ) def abort_bootstrap(self, request, slug=None): blockchain = self.get_object() self._abort_previous_tasks(blockchain) return Response(status=status.HTTP_200_OK) @action(detail=True, methods=['get']) def graphs(self, request, slug=None): """Returns data for all graphs.""" data = { 'transaction_graph': load_data_from_redis(f'tx_graph__{slug}'), } return Response(data=data, status=status.HTTP_200_OK) @action(detail=True, methods=['post']) def accepted(self, request, slug=None): # NOTE: if node is offline and then you start it again then it will # call this view for each block it will get. In this case there will be # many fast sequential calls to this view, there might be too many # postgres connections opened so view executions might actually fail. # The suggested solution is to comment out 'block_accepted_url' in # node's config file, run the node, wait for it to sync, uncomment # 'block_accepted_url' and then manually bootstrap it. blockchain = self.get_object() # check if new block has been receiver when this blockchain is in the # process of being deleted. deleting = DramatiqTask.objects.filter( type=DramatiqTask.Type.BLOCKCHAIN_DELETE, object_id=blockchain.id, content_type=ContentType.objects.get_for_model(blockchain) ).exists() if deleting: # nothing to do, ignore the new block return Response(status=status.HTTP_404_NOT_FOUND) # get request data height = request.data['data']['header']['height'] hash = request.data['hash'] # prev_hash comes as list of int bytes, so we convert it to hex # NOTE: the same is true for some other data which we currently don't # need so we don't transform it, eg. data.header.kernel_root prev_hash = None if request.data['data']['header']['prev_hash']: prev_hash = bytes(request.data['data']['header']['prev_hash']).hex() logger.info( 'Block accepted', extra={ 'height': height, 'hash': hash, 'prev_hash': prev_hash, 'blockchain': blockchain.slug, }, ) web_socket_msg_type = 'send_block' # handle reorg case # we expect blocks to come ordered by height, there are some edge cases # here which are not handled, but they're unlikely to happen (eg. reorg # happens but websocket calls for first blocks fails while for later it # doesn't and then the code bellow wouldn't spot a reorg) block_at_this_height = blockchain.blocks\ .filter(height=height, reorg__isnull=True)\ .first() # we fetch here because anyone can call this view - we don't want to # work with fake data new_block = fetch_and_store_block(blockchain, height, prefetch=False) if block_at_this_height: if block_at_this_height.hash == new_block.hash: # probably have fetched this block while bootstraping, accepted # view got called a bit later so we already have it, noop return Response(status=status.HTTP_200_OK) logger.info( 'Block accepted - reorg spotted', extra={ 'block_at_this_height': block_at_this_height, 'block_at_this_height.hash': block_at_this_height.hash, 'block_at_this_height.reorg': block_at_this_height.reorg, 'hash': new_block.hash }, ) # reorg spotted reorged_blocks = list(blockchain.blocks\ .filter(height__gte=height, reorg__isnull=True) .exclude(pk=new_block.pk) .order_by('height')) logger.info('reorged_blocks at start: {}'.format(reorged_blocks)) # these reorged blocks are guaranteed to be reorged, now find any # previous blocks which were also reorged - aka get common # ancestor of the reorged block at 'height' and the new (main) block # find the common ancestor of this block and the reorged block at # the same height. We start with the current height to avoid more # logic for Reorg instance params if new_block.hash == block_at_this_height.hash: # at height X we got H1, then we got H2 (this call), but now it # reorged back to H1, so we don't do anything, no reorg is # stored since we didn't fetch the block in time from the node logger.info('Reorg cancelled out, noop') return Response(status=status.HTTP_200_OK) logger.info('new_block', extra={'hash': new_block.hash, 'prev_hash': new_block.prev_hash}) prev_block_new_chain = new_block prev_block_old_chain = reorged_blocks[0] logger.info('prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) # remove the first one since it will get added again reorged_blocks = reorged_blocks[1:] logger.info('reorged_blocks after [1:]: {}'.format(reorged_blocks)) main_blocks = [] while True: # theoretically we might be missing the block in db but we don't # cover such cases currently if not prev_block_new_chain: logger.info('reached break in IF NOT prev_block_new_chain') # this means that prev_block_old_chain is also None, since # they're both "previous" of their genesis block break if prev_block_new_chain == prev_block_old_chain: logger.info('reached break in IF NOT prev_block_new_chain == prev_block_old_chain') # found the common ancestor break # add to the left because we want to keep it sorted by height reorged_blocks.insert(0, prev_block_old_chain) main_blocks.insert(0, prev_block_new_chain) logger.info('new reorged_blocks: {}'.format(reorged_blocks)) logger.info('new main_blocks: {}'.format(main_blocks)) prev_block_new_chain = prev_block_new_chain.get_previous_block() prev_block_old_chain = prev_block_old_chain.get_previous_block() logger.info('new prev_block_new_chain: {}, prev_block_old_chain: {}'.format(prev_block_new_chain, prev_block_old_chain)) logger.info('before reorg create: reorged_blocks: {}, main_blocks: {}'.format(reorged_blocks, main_blocks)) reorg = Reorg.objects.create( blockchain=blockchain, start_reorg_block=reorged_blocks[0], end_reorg_block=reorged_blocks[-1], start_main_block=main_blocks[0], ) # Reorg post_save signal fixes .reorg on new/old blocks and fixes # inputs/outputs web_socket_msg_type = 'reorged' web_socket_msg = BlockSerializer(new_block).data if web_socket_msg_type == 'reorged': web_socket_msg = blockchain.slug # TODO: check if channels-redis 4.x is fixed: https://github.com/django/channels_redis/issues/332 channel_layer = channels.layers.get_channel_layer() async_to_sync(channel_layer.group_send)( 'default_group', { 'type': web_socket_msg_type, 'message': web_socket_msg, } ) # update the loading progress since it could be skewed due to the # periodic task updating it before this view has been called try: update_blockchain_progress(blockchain) except UpdateBlockchainProgressError: # ignore it, let it update itself the next time pass return Response(status=status.HTTP_200_OK) def get_permissions(self): """ Add, delete and update require authentication, others don't. """ # accepted view can currently be called by anyone, we ignore its data though # and fetch it from our node. Maybe in the future node could send some # header to prevent potential spam permission_classes = [] if self.action not in ['list', 'retrieve', 'accepted', 'graphs']: permission_classes = [IsAuthenticated] return [permission() for permission in permission_classes] class BlockViewSet(CustomModelViewSet): """API endpoint for Block. This ViewSet is nested in BlockchainViewSet.""" queryset = Block.objects\ .order_by('-height')\ .all() filter_backends = get_filter_backends({
'SearchFilter': CustomBlockSearchFilter,
6
2023-12-24 22:15:11+00:00
16k
lchen1019/Image_Cropper
ISAT/widgets/mainwindow.py
[ { "identifier": "Ui_MainWindow", "path": "ISAT/ui/MainWindow.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(1280, 764)\n MainWindow.setMinimumSize(QtCore.QSize(800, 600))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n MainWindow.setFont(font)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/icons/icons/isat_bg_50x25.png\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n MainWindow.setWindowIcon(icon)\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\n self.verticalLayout.setSpacing(0)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setEnabled(True)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 1280, 24))\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menubar.setFont(font)\n self.menubar.setAutoFillBackground(False)\n self.menubar.setDefaultUp(False)\n self.menubar.setNativeMenuBar(True)\n self.menubar.setObjectName(\"menubar\")\n self.menuFile = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuFile.setFont(font)\n self.menuFile.setObjectName(\"menuFile\")\n self.menuView = QtWidgets.QMenu(self.menubar)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuView.setFont(font)\n self.menuView.setObjectName(\"menuView\")\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/icon/icons/翻译_translate.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n MainWindow.setMenuBar(self.menubar)\n\n self.menuTools = QtWidgets.QMenu(self.menubar)\n self.menuTools.setEnabled(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.menuTools.setFont(font)\n self.menuTools.setObjectName(\"menuTools\")\n\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n self.toolBar = QtWidgets.QToolBar(MainWindow)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.toolBar.setFont(font)\n self.toolBar.setIconSize(QtCore.QSize(24, 24))\n self.toolBar.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)\n self.toolBar.setFloatable(False)\n self.toolBar.setObjectName(\"toolBar\")\n MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)\n self.dockWidgetContents_2 = QtWidgets.QWidget()\n self.dockWidgetContents_2.setObjectName(\"dockWidgetContents_2\")\n self.dockWidgetContents_3 = QtWidgets.QWidget()\n self.dockWidgetContents_3.setObjectName(\"dockWidgetContents_3\")\n self.files_dock = QtWidgets.QDockWidget(MainWindow)\n self.files_dock.setObjectName(\"files_dock\")\n self.dockWidgetContents = QtWidgets.QWidget()\n self.dockWidgetContents.setObjectName(\"dockWidgetContents\")\n self.files_dock.setWidget(self.dockWidgetContents)\n MainWindow.addDockWidget(QtCore.Qt.DockWidgetArea(2), self.files_dock)\n self.dockWidgetContents_4 = QtWidgets.QWidget()\n self.dockWidgetContents_4.setObjectName(\"dockWidgetContents_4\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.dockWidgetContents_4)\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.actionOpen_dir = QtWidgets.QAction(MainWindow)\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/icon/icons/照片_pic.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionOpen_dir.setIcon(icon2)\n self.actionOpen_dir.setObjectName(\"actionOpen_dir\")\n self.actionZoom_in = QtWidgets.QAction(MainWindow)\n icon3 = QtGui.QIcon()\n icon3.addPixmap(QtGui.QPixmap(\":/icon/icons/放大_zoom-in.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionZoom_in.setIcon(icon3)\n self.actionZoom_in.setObjectName(\"actionZoom_in\")\n self.actionZoom_out = QtWidgets.QAction(MainWindow)\n icon4 = QtGui.QIcon()\n icon4.addPixmap(QtGui.QPixmap(\":/icon/icons/缩小_zoom-out.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionZoom_out.setIcon(icon4)\n self.actionZoom_out.setObjectName(\"actionZoom_out\")\n self.actionFit_wiondow = QtWidgets.QAction(MainWindow)\n icon5 = QtGui.QIcon()\n icon5.addPixmap(QtGui.QPixmap(\":/icon/icons/全宽_fullwidth.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionFit_wiondow.setIcon(icon5)\n self.actionFit_wiondow.setObjectName(\"actionFit_wiondow\")\n self.actionSetting = QtWidgets.QAction(MainWindow)\n icon6 = QtGui.QIcon()\n icon6.addPixmap(QtGui.QPixmap(\":/icon/icons/设置_setting-two.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSetting.setIcon(icon6)\n self.actionSetting.setObjectName(\"actionSetting\")\n self.actionExit = QtWidgets.QAction(MainWindow)\n icon7 = QtGui.QIcon()\n icon7.addPixmap(QtGui.QPixmap(\":/icon/icons/开关_power.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionExit.setIcon(icon7)\n self.actionExit.setObjectName(\"actionExit\")\n self.actionSave_dir = QtWidgets.QAction(MainWindow)\n icon8 = QtGui.QIcon()\n icon8.addPixmap(QtGui.QPixmap(\":/icon/icons/文件夹-开_folder-open.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSave_dir.setIcon(icon8)\n self.actionSave_dir.setObjectName(\"actionSave_dir\")\n self.actionSave = QtWidgets.QAction(MainWindow)\n icon9 = QtGui.QIcon()\n icon9.addPixmap(QtGui.QPixmap(\":/icon/icons/保存_save.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionSave.setIcon(icon9)\n self.actionSave.setObjectName(\"actionSave\")\n self.actionPrev = QtWidgets.QAction(MainWindow)\n self.actionPrev.setCheckable(False)\n icon10 = QtGui.QIcon()\n icon10.addPixmap(QtGui.QPixmap(\":/icon/icons/上一步_back.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionPrev.setIcon(icon10)\n self.actionPrev.setMenuRole(QtWidgets.QAction.TextHeuristicRole)\n self.actionPrev.setPriority(QtWidgets.QAction.NormalPriority)\n self.actionPrev.setObjectName(\"actionPrev\")\n self.actionNext = QtWidgets.QAction(MainWindow)\n icon11 = QtGui.QIcon()\n icon11.addPixmap(QtGui.QPixmap(\":/icon/icons/下一步_next.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionNext.setIcon(icon11)\n self.actionNext.setObjectName(\"actionNext\")\n self.actionShortcut = QtWidgets.QAction(MainWindow)\n icon12 = QtGui.QIcon()\n icon12.addPixmap(QtGui.QPixmap(\":/icon/icons/键盘_keyboard-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionShortcut.setIcon(icon12)\n self.actionShortcut.setObjectName(\"actionShortcut\")\n self.actionAbout = QtWidgets.QAction(MainWindow)\n icon13 = QtGui.QIcon()\n icon13.addPixmap(QtGui.QPixmap(\":/icon/icons/我的_me.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionAbout.setIcon(icon13)\n self.actionAbout.setObjectName(\"actionAbout\")\n self.actionDelete = QtWidgets.QAction(MainWindow)\n self.actionDelete.setEnabled(False)\n icon15 = QtGui.QIcon()\n icon15.addPixmap(QtGui.QPixmap(\":/icon/icons/删除_delete.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionDelete.setIcon(icon15)\n self.actionDelete.setObjectName(\"actionDelete\")\n self.actionBit_map = QtWidgets.QAction(MainWindow)\n self.actionBit_map.setCheckable(False)\n self.actionBit_map.setIcon(icon2)\n self.actionBit_map.setObjectName(\"actionBit_map\")\n self.actionEdit = QtWidgets.QAction(MainWindow)\n self.actionEdit.setEnabled(False)\n icon16 = QtGui.QIcon()\n icon16.addPixmap(QtGui.QPixmap(\":/icon/icons/编辑_edit.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionEdit.setIcon(icon16)\n self.actionEdit.setObjectName(\"actionEdit\")\n self.actionTo_top = QtWidgets.QAction(MainWindow)\n self.actionTo_top.setEnabled(False)\n icon17 = QtGui.QIcon()\n icon17.addPixmap(QtGui.QPixmap(\":/icon/icons/去顶部_to-top.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionTo_top.setIcon(icon17)\n self.actionTo_top.setObjectName(\"actionTo_top\")\n self.actionTo_bottom = QtWidgets.QAction(MainWindow)\n self.actionTo_bottom.setEnabled(False)\n icon18 = QtGui.QIcon()\n icon18.addPixmap(QtGui.QPixmap(\":/icon/icons/去底部_to-bottom.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionTo_bottom.setIcon(icon18)\n self.actionTo_bottom.setObjectName(\"actionTo_bottom\")\n self.actionChinese = QtWidgets.QAction(MainWindow)\n self.actionChinese.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionChinese.setFont(font)\n self.actionChinese.setObjectName(\"actionChinese\")\n self.actionEnglish = QtWidgets.QAction(MainWindow)\n self.actionEnglish.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionEnglish.setFont(font)\n self.actionEnglish.setObjectName(\"actionEnglish\")\n self.actionBackspace = QtWidgets.QAction(MainWindow)\n icon19 = QtGui.QIcon()\n icon19.addPixmap(QtGui.QPixmap(\":/icon/icons/删除_delete-two.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionBackspace.setIcon(icon19)\n self.actionBackspace.setObjectName(\"actionBackspace\")\n self.actionCancel = QtWidgets.QAction(MainWindow)\n icon20 = QtGui.QIcon()\n icon20.addPixmap(QtGui.QPixmap(\":/icon/icons/关闭_close-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionCancel.setIcon(icon20)\n self.actionCancel.setObjectName(\"actionCancel\")\n self.actionFinish = QtWidgets.QAction(MainWindow)\n icon21 = QtGui.QIcon()\n icon21.addPixmap(QtGui.QPixmap(\":/icon/icons/校验_check-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionFinish.setIcon(icon21)\n self.actionFinish.setObjectName(\"actionFinish\")\n self.actionPolygon = QtWidgets.QAction(MainWindow)\n icon22 = QtGui.QIcon()\n icon22.addPixmap(QtGui.QPixmap(\":/icon/icons/锚点_anchor.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionPolygon.setIcon(icon22)\n self.actionPolygon.setObjectName(\"actionPolygon\")\n self.actionVisible = QtWidgets.QAction(MainWindow)\n icon23 = QtGui.QIcon()\n icon23.addPixmap(QtGui.QPixmap(\":/icon/icons/眼睛_eyes.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionVisible.setIcon(icon23)\n self.actionVisible.setObjectName(\"actionVisible\")\n self.actionContour_Max_only = QtWidgets.QAction(MainWindow)\n self.actionContour_Max_only.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_Max_only.setFont(font)\n self.actionContour_Max_only.setObjectName(\"actionContour_Max_only\")\n self.actionContour_External = QtWidgets.QAction(MainWindow)\n self.actionContour_External.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_External.setFont(font)\n self.actionContour_External.setObjectName(\"actionContour_External\")\n self.actionContour_All = QtWidgets.QAction(MainWindow)\n self.actionContour_All.setCheckable(True)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionContour_All.setFont(font)\n self.actionContour_All.setObjectName(\"actionContour_All\")\n self.actionModel_manage = QtWidgets.QAction(MainWindow)\n icon24 = QtGui.QIcon()\n icon24.addPixmap(QtGui.QPixmap(\":/icon/icons/列表_list-middle.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionModel_manage.setIcon(icon24)\n font = QtGui.QFont()\n font.setFamily(\"Times New Roman\")\n font.setPointSize(12)\n self.actionModel_manage.setFont(font)\n self.actionModel_manage.setObjectName(\"actionModel_manage\")\n self.actionConverter = QtWidgets.QAction(MainWindow)\n icon25 = QtGui.QIcon()\n icon25.addPixmap(QtGui.QPixmap(\":/icon/icons/转换文件夹1_folder-conversion-one.svg\"), QtGui.QIcon.Normal, QtGui.QIcon.Off)\n self.actionConverter.setIcon(icon25)\n self.actionConverter.setObjectName(\"actionConverter\")\n self.menuFile.addAction(self.actionOpen_dir)\n self.menuFile.addAction(self.actionSave_dir)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionPrev)\n self.menuFile.addAction(self.actionNext)\n self.menuFile.addSeparator()\n self.menuFile.addAction(self.actionSetting)\n self.menuFile.addAction(self.actionExit)\n self.menuView.addSeparator()\n self.menuView.addAction(self.actionZoom_in)\n self.menuView.addAction(self.actionZoom_out)\n self.menuView.addAction(self.actionFit_wiondow)\n self.menuView.addSeparator()\n self.menuView.addAction(self.actionBit_map)\n self.menuView.addSeparator()\n self.menuTools.addSeparator()\n self.menuTools.addAction(self.actionConverter)\n self.menubar.addAction(self.menuFile.menuAction())\n self.menubar.addAction(self.menuView.menuAction())\n self.menubar.addAction(self.menuTools.menuAction())\n\n self.toolBar.addAction(self.actionPrev)\n self.toolBar.addAction(self.actionNext)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionPolygon)\n self.toolBar.addAction(self.actionFinish)\n self.toolBar.addAction(self.actionCancel)\n self.toolBar.addAction(self.actionSave)\n self.toolBar.addAction(self.actionDelete)\n self.toolBar.addSeparator()\n self.toolBar.addAction(self.actionZoom_in)\n self.toolBar.addAction(self.actionZoom_out)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ISAT\"))\n self.menuFile.setTitle(_translate(\"MainWindow\", \"File\"))\n self.menuView.setTitle(_translate(\"MainWindow\", \"View\"))\n self.menuTools.setTitle(_translate(\"MainWindow\", \"Tools\"))\n self.toolBar.setWindowTitle(_translate(\"MainWindow\", \"toolBar\"))\n self.files_dock.setWindowTitle(_translate(\"MainWindow\", \"Files\"))\n self.actionOpen_dir.setText(_translate(\"MainWindow\", \"Images dir\"))\n self.actionOpen_dir.setStatusTip(_translate(\"MainWindow\", \"Open images dir.\"))\n self.actionZoom_in.setText(_translate(\"MainWindow\", \"Zoom in\"))\n self.actionZoom_in.setStatusTip(_translate(\"MainWindow\", \"Zoom in.\"))\n self.actionZoom_out.setText(_translate(\"MainWindow\", \"Zoom out\"))\n self.actionZoom_out.setStatusTip(_translate(\"MainWindow\", \"Zoom out.\"))\n self.actionFit_wiondow.setText(_translate(\"MainWindow\", \"Fit window\"))\n self.actionFit_wiondow.setToolTip(_translate(\"MainWindow\", \"Fit window\"))\n self.actionFit_wiondow.setStatusTip(_translate(\"MainWindow\", \"Fit window.\"))\n self.actionFit_wiondow.setShortcut(_translate(\"MainWindow\", \"F\"))\n self.actionSetting.setText(_translate(\"MainWindow\", \"Setting\"))\n self.actionSetting.setStatusTip(_translate(\"MainWindow\", \"Setting.\"))\n self.actionExit.setText(_translate(\"MainWindow\", \"Exit\"))\n self.actionExit.setToolTip(_translate(\"MainWindow\", \"Exit\"))\n self.actionExit.setStatusTip(_translate(\"MainWindow\", \"Exit.\"))\n self.actionSave_dir.setText(_translate(\"MainWindow\", \"Label dir\"))\n self.actionSave_dir.setStatusTip(_translate(\"MainWindow\", \"Open label dir.\"))\n self.actionSave.setText(_translate(\"MainWindow\", \"Save\"))\n self.actionSave.setStatusTip(_translate(\"MainWindow\", \"Save annotation.\"))\n self.actionSave.setShortcut(_translate(\"MainWindow\", \"S\"))\n self.actionPrev.setText(_translate(\"MainWindow\", \"Prev image\"))\n self.actionPrev.setToolTip(_translate(\"MainWindow\", \"Prev image\"))\n self.actionPrev.setStatusTip(_translate(\"MainWindow\", \"Prev image.\"))\n self.actionPrev.setShortcut(_translate(\"MainWindow\", \"A\"))\n self.actionNext.setText(_translate(\"MainWindow\", \"Next image\"))\n self.actionNext.setToolTip(_translate(\"MainWindow\", \"Next image\"))\n self.actionNext.setStatusTip(_translate(\"MainWindow\", \"Next image.\"))\n self.actionNext.setShortcut(_translate(\"MainWindow\", \"D\"))\n self.actionShortcut.setText(_translate(\"MainWindow\", \"Shortcut\"))\n self.actionAbout.setText(_translate(\"MainWindow\", \"About\"))\n self.actionDelete.setText(_translate(\"MainWindow\", \"Delete\"))\n self.actionDelete.setToolTip(_translate(\"MainWindow\", \"Delete polygon\"))\n self.actionDelete.setStatusTip(_translate(\"MainWindow\", \"Delete polygon.\"))\n self.actionDelete.setShortcut(_translate(\"MainWindow\", \"Del\"))\n self.actionBit_map.setText(_translate(\"MainWindow\", \"Bit map\"))\n self.actionBit_map.setStatusTip(_translate(\"MainWindow\", \"Show instance or segmeent state.\"))\n self.actionBit_map.setShortcut(_translate(\"MainWindow\", \"Space\"))\n self.actionEdit.setText(_translate(\"MainWindow\", \"Edit\"))\n self.actionEdit.setToolTip(_translate(\"MainWindow\", \"Edit polygon\"))\n self.actionEdit.setStatusTip(_translate(\"MainWindow\", \"Edit polygon attribute.\"))\n self.actionTo_top.setText(_translate(\"MainWindow\", \"To top\"))\n self.actionTo_top.setToolTip(_translate(\"MainWindow\", \"Move polygon to top layer\"))\n self.actionTo_top.setStatusTip(_translate(\"MainWindow\", \"Move polygon to top layer.\"))\n self.actionTo_top.setShortcut(_translate(\"MainWindow\", \"T\"))\n self.actionTo_bottom.setText(_translate(\"MainWindow\", \"To bottom\"))\n self.actionTo_bottom.setToolTip(_translate(\"MainWindow\", \"Move polygon to bottom layer\"))\n self.actionTo_bottom.setStatusTip(_translate(\"MainWindow\", \"Move polygon to bottom layer.\"))\n self.actionTo_bottom.setShortcut(_translate(\"MainWindow\", \"B\"))\n self.actionChinese.setText(_translate(\"MainWindow\", \"中文\"))\n self.actionEnglish.setText(_translate(\"MainWindow\", \"English\"))\n self.actionBackspace.setText(_translate(\"MainWindow\", \"Backspace\"))\n self.actionBackspace.setToolTip(_translate(\"MainWindow\", \"Backspace\"))\n self.actionBackspace.setStatusTip(_translate(\"MainWindow\", \"Backspace.\"))\n self.actionBackspace.setShortcut(_translate(\"MainWindow\", \"Z\"))\n self.actionCancel.setText(_translate(\"MainWindow\", \"Cancel\"))\n self.actionCancel.setToolTip(_translate(\"MainWindow\", \"Annotate canceled\"))\n self.actionCancel.setStatusTip(_translate(\"MainWindow\", \"Annotate canceled.\"))\n self.actionCancel.setShortcut(_translate(\"MainWindow\", \"Esc\"))\n self.actionFinish.setText(_translate(\"MainWindow\", \"Finish\"))\n self.actionFinish.setToolTip(_translate(\"MainWindow\", \"Annotate finished\"))\n self.actionFinish.setStatusTip(_translate(\"MainWindow\", \"Annotate finished.\"))\n self.actionFinish.setShortcut(_translate(\"MainWindow\", \"E\"))\n self.actionPolygon.setText(_translate(\"MainWindow\", \"Polygon\"))\n self.actionPolygon.setToolTip(_translate(\"MainWindow\", \"Draw polygon\"))\n self.actionPolygon.setStatusTip(_translate(\"MainWindow\", \"Accurately annotate by drawing polygon. \"))\n self.actionPolygon.setShortcut(_translate(\"MainWindow\", \"Q\"))\n self.actionVisible.setText(_translate(\"MainWindow\", \"Visible\"))\n self.actionVisible.setToolTip(_translate(\"MainWindow\", \"Visible\"))\n self.actionVisible.setStatusTip(_translate(\"MainWindow\", \"Visible.\"))\n self.actionVisible.setShortcut(_translate(\"MainWindow\", \"V\"))\n self.actionContour_Max_only.setText(_translate(\"MainWindow\", \"Max only\"))\n self.actionContour_Max_only.setStatusTip(_translate(\"MainWindow\", \"Max contour save only.\"))\n self.actionContour_Max_only.setWhatsThis(_translate(\"MainWindow\", \"Max contour save only.\"))\n self.actionContour_External.setText(_translate(\"MainWindow\", \"External\"))\n self.actionContour_External.setStatusTip(_translate(\"MainWindow\", \"External contour save only.\"))\n self.actionContour_External.setWhatsThis(_translate(\"MainWindow\", \"External contour save only.\"))\n self.actionContour_All.setText(_translate(\"MainWindow\", \"All\"))\n self.actionContour_All.setStatusTip(_translate(\"MainWindow\", \"All contour save.\"))\n self.actionContour_All.setWhatsThis(_translate(\"MainWindow\", \"All contour save.\"))\n self.actionModel_manage.setText(_translate(\"MainWindow\", \"Model manage\"))\n self.actionModel_manage.setStatusTip(_translate(\"MainWindow\", \"Model manage.\"))\n self.actionModel_manage.setWhatsThis(_translate(\"MainWindow\", \"Model manage.\"))\n self.actionConverter.setText(_translate(\"MainWindow\", \"Converter\"))" }, { "identifier": "FilesDockWidget", "path": "ISAT/widgets/files_dock_widget.py", "snippet": "class FilesDockWidget(QtWidgets.QWidget, Ui_Form):\n def __init__(self, mainwindow):\n super(FilesDockWidget, self).__init__()\n self.setupUi(self)\n self.mainwindow = mainwindow\n self.listWidget.clicked.connect(self.listwidget_doubleclick)\n self.lineEdit_jump.returnPressed.connect(self.mainwindow.jump_to)\n\n def generate_item_and_itemwidget(self, file_name):\n item = QtWidgets.QListWidgetItem()\n item.setSizeHint(QtCore.QSize(200, 30))\n item_widget = QtWidgets.QWidget()\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(9, 1, 9, 1)\n\n state_color = QtWidgets.QLabel()\n state_color.setFixedWidth(5)\n state_color.setStyleSheet(\"background-color: {};\".format('#999999'))\n state_color.setObjectName('state_color')\n layout.addWidget(state_color)\n\n category = QtWidgets.QLabel(file_name)\n category.setObjectName('category')\n layout.addWidget(category)\n\n item_widget.setLayout(layout)\n return item, item_widget\n\n def update_widget(self):\n self.listWidget.clear()\n if self.mainwindow.files_list is None:\n return\n\n for file_path in self.mainwindow.files_list:\n _, file_name = os.path.split(file_path)\n item = QtWidgets.QListWidgetItem()\n item.setSizeHint(QtCore.QSize(200, 30))\n # item, item_widget = self.generate_item_and_itemwidget(file_name)\n\n item.setText(file_name)\n self.listWidget.addItem(item)\n # self.listWidget.setItemWidget(item, item_widget)\n\n self.label_all.setText('{}'.format(len(self.mainwindow.files_list)))\n\n def set_select(self, row):\n self.listWidget.setCurrentRow(row)\n\n def listwidget_doubleclick(self):\n row = self.listWidget.currentRow()\n self.mainwindow.current_index = row\n self.mainwindow.show_image(row)" }, { "identifier": "AnnotationScene", "path": "ISAT/widgets/canvas.py", "snippet": "class AnnotationScene(QtWidgets.QGraphicsScene):\n def __init__(self, mainwindow):\n super(AnnotationScene, self).__init__()\n self.mainwindow = mainwindow\n self.image_item:QtWidgets.QGraphicsPixmapItem = None\n self.image_data = None\n self.current_graph:QGraphicsRectItem = None\n self.mode = STATUSMode.VIEW\n self.click = CLICKMode.POSITIVE\n self.click_points = []\n\n self.mask_alpha = 0.5\n self.top_layer = 1\n\n self.guide_line_x:QtWidgets.QGraphicsLineItem = None\n self.guide_line_y:QtWidgets.QGraphicsLineItem = None\n\n # 拖动鼠标描点 \n self.last_draw_time = time.time()\n self.draw_interval = 0.15\n self.pressd = False\n\n def load_image(self, image_path:str):\n self.clear()\n\n self.image_data = np.array(Image.open(image_path))\n \n self.image_item = QtWidgets.QGraphicsPixmapItem()\n self.image_item.setZValue(0)\n self.addItem(self.image_item)\n self.image_item.setPixmap(QtGui.QPixmap(image_path))\n self.setSceneRect(self.image_item.boundingRect())\n \n def start_draw_polygon(self):\n if self.mode != STATUSMode.VIEW:\n return\n self.change_mode_to_create()\n if self.mode == STATUSMode.CREATE:\n self.start_draw()\n \n def start_draw(self):\n print('start_draw')\n self.current_graph = QGraphicsRectItem()\n self.addItem(self.current_graph)\n \n def change_mode_to_view(self):\n self.mode = STATUSMode.VIEW\n self.image_item.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.ArrowCursor))\n self.mainwindow.actionPrev.setEnabled(True)\n self.mainwindow.actionNext.setEnabled(True)\n\n self.mainwindow.actionPolygon.setEnabled(self.mainwindow.can_be_annotated)\n self.mainwindow.actionBackspace.setEnabled(False)\n self.mainwindow.actionFinish.setEnabled(False)\n self.mainwindow.actionCancel.setEnabled(False)\n\n self.mainwindow.actionEdit.setEnabled(False)\n self.mainwindow.actionDelete.setEnabled(False)\n self.mainwindow.actionSave.setEnabled(self.mainwindow.can_be_annotated)\n\n def change_mode_to_create(self):\n if self.image_item is None:\n return\n self.mode = STATUSMode.CREATE\n self.image_item.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))\n self.mainwindow.actionPrev.setEnabled(False)\n self.mainwindow.actionNext.setEnabled(False)\n\n self.mainwindow.actionPolygon.setEnabled(False)\n self.mainwindow.actionBackspace.setEnabled(True)\n self.mainwindow.actionFinish.setEnabled(True)\n self.mainwindow.actionCancel.setEnabled(True)\n\n self.mainwindow.actionEdit.setEnabled(False)\n self.mainwindow.actionDelete.setEnabled(False)\n self.mainwindow.actionSave.setEnabled(False)\n\n def finish_draw(self):\n print('finish_draw')\n print(self.click_points)\n\n if self.current_graph is None:\n self.click_points.clear()\n return\n \n # 保存当前矩形\n print(self.click_points)\n print(self.mainwindow.rects)\n rect = {\n \"point1-x\": self.click_points[0][0],\n \"point1-y\": self.click_points[0][1],\n \"point2-x\": self.click_points[1][0],\n \"point2-y\": self.click_points[1][1],\n }\n print(rect)\n self.mainwindow.rects.append(rect)\n\n # 删除当前绘制对象\n self.click_points.clear()\n self.removeItem(self.current_graph)\n self.current_graph = None\n\n self.change_mode_to_view()\n\n\n def cancel_draw(self):\n if self.current_graph is None:\n return\n self.removeItem(self.current_graph)\n self.current_graph = None\n self.change_mode_to_view()\n self.click_points.clear()\n \n\n def mousePressEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'):\n if self.mode == STATUSMode.VIEW:\n return\n sceneX, sceneY = event.scenePos().x(), event.scenePos().y()\n sceneX = 0 if sceneX < 0 else sceneX\n sceneX = self.width()-1 if sceneX > self.width()-1 else sceneX\n sceneY = 0 if sceneY < 0 else sceneY\n sceneY = self.height()-1 if sceneY > self.height()-1 else sceneY\n print(sceneX, sceneY)\n\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n print('left click')\n self.pressd = True\n\n if len(self.click_points) <= 2:\n self.click_points.append([sceneX, sceneY])\n\n if len(self.click_points) == 2:\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n p1 = self.click_points[0]\n p2 = self.click_points[1]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n super(AnnotationScene, self).mousePressEvent(event)\n\n # 拖动鼠标描点 \n def mouseReleaseEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'): \n self.pressd = False\n super(AnnotationScene, self).mouseReleaseEvent(event)\n \n def eventFilter(self, obj, event):\n if event.type() == QEvent.GraphicsSceneMouseMove and event.buttons() == Qt.LeftButton:\n self.mouseMoveEvent(event)\n return True\n return super(RectangleScene, self).eventFilter(obj, event)\n\n def mouseMoveEvent(self, event: 'QtWidgets.QGraphicsSceneMouseEvent'):\n # 拖动鼠标描点\n pos = event.scenePos()\n if pos.x() < 0: pos.setX(0)\n if pos.x() > self.width()-1: pos.setX(self.width()-1)\n if pos.y() < 0: pos.setY(0)\n if pos.y() > self.height()-1: pos.setY(self.height()-1)\n\n if len(self.click_points) == 1:\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n p1 = self.click_points[0]\n p2 = [pos.x(), pos.y()]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n else:\n return\n\n # 状态栏,显示当前坐标\n if self.image_data is not None:\n x, y = round(pos.x()), round(pos.y())\n self.mainwindow.labelCoord.setText('xy: ({:>4d},{:>4d})'.format(x, y))\n\n data = self.image_data[y][x]\n if self.image_data.ndim == 2:\n self.mainwindow.labelData.setText('pix: [{:^3d}]'.format(data))\n elif self.image_data.ndim == 3:\n if len(data) == 3:\n self.mainwindow.labelData.setText('rgb: [{:>3d},{:>3d},{:>3d}]'.format(data[0], data[1], data[2]))\n else:\n self.mainwindow.labelData.setText('pix: [{}]'.format(data))\n\n super(AnnotationScene, self).mouseMoveEvent(event)\n \n def show_all(self):\n print('show_all')\n\n pen = QPen(Qt.red)\n pen.setWidth(5)\n brush = QBrush(QColor(255, 255, 255, 128))\n\n for rect in self.mainwindow.rects:\n self.current_graph = QGraphicsRectItem()\n self.addItem(self.current_graph)\n p1 = [rect[\"point1-x\"], rect[\"point1-y\"]]\n p2 = [rect[\"point2-x\"], rect[\"point2-y\"]]\n self.current_graph.setPen(pen)\n self.current_graph.setBrush(brush)\n self.current_graph.setRect(p1[0], p1[1], p2[0]-p1[0], p2[1]-p1[1])\n\n def hide_all(self):\n print('hide_all')\n items_to_remove = [item for item in self.items() if isinstance(item, QGraphicsRectItem)]\n for item in items_to_remove:\n self.removeItem(item)" }, { "identifier": "AnnotationView", "path": "ISAT/widgets/canvas.py", "snippet": "class AnnotationView(QtWidgets.QGraphicsView):\n def __init__(self, parent=None):\n super(AnnotationView, self).__init__(parent)\n self.setMouseTracking(True)\n self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)\n self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAlwaysOn)\n self.setDragMode(QtWidgets.QGraphicsView.DragMode.ScrollHandDrag)\n self.factor = 1.2\n\n def wheelEvent(self, event: QtGui.QWheelEvent):\n angel = event.angleDelta()\n angelX, angelY = angel.x(), angel.y()\n point = event.pos() # 当前鼠标位置\n if angelY > 0:\n self.zoom(self.factor, point)\n else:\n self.zoom(1 / self.factor, point)\n\n def zoom_in(self):\n self.zoom(self.factor)\n\n def zoom_out(self):\n self.zoom(1/self.factor)\n\n def zoomfit(self):\n self.fitInView(0, 0, self.scene().width(), self.scene().height(), QtCore.Qt.AspectRatioMode.KeepAspectRatio)\n\n def zoom(self, factor, point=None):\n mouse_old = self.mapToScene(point) if point is not None else None\n # 缩放比例\n\n pix_widget = self.transform().scale(factor, factor).mapRect(QtCore.QRectF(0, 0, 1, 1)).width()\n if pix_widget > 30 and factor > 1: return\n if pix_widget < 0.01 and factor < 1: return\n\n self.scale(factor, factor)\n if point is not None:\n mouse_now = self.mapToScene(point)\n center_now = self.mapToScene(self.viewport().width() // 2, self.viewport().height() // 2)\n center_new = mouse_old - mouse_now + center_now\n self.centerOn(center_new)" }, { "identifier": "STATUSMode", "path": "ISAT/configs.py", "snippet": "class STATUSMode(Enum):\n VIEW = 0\n CREATE = 1\n EDIT = 2" }, { "identifier": "MAPMode", "path": "ISAT/configs.py", "snippet": "class MAPMode(Enum):\n LABEL = 0\n SEMANTIC = 1\n INSTANCE = 2" }, { "identifier": "load_config", "path": "ISAT/configs.py", "snippet": "def load_config(file):\n with open(file, 'rb')as f:\n cfg = yaml.load(f.read(), Loader=yaml.FullLoader)\n return cfg" }, { "identifier": "save_config", "path": "ISAT/configs.py", "snippet": "def save_config(cfg, file):\n s = yaml.dump(cfg)\n with open(file, 'w') as f:\n f.write(s)\n return True" }, { "identifier": "CONFIG_FILE", "path": "ISAT/configs.py", "snippet": "CONFIG_FILE = os.path.join(ISAT_ROOT, 'isat.yaml')" }, { "identifier": "DEFAULT_CONFIG_FILE", "path": "ISAT/configs.py", "snippet": "DEFAULT_CONFIG_FILE = os.path.join(ISAT_ROOT, 'default.yaml')" }, { "identifier": "CHECKPOINT_PATH", "path": "ISAT/configs.py", "snippet": "CHECKPOINT_PATH = os.path.join(ISAT_ROOT, 'checkpoints')" }, { "identifier": "ISAT_ROOT", "path": "ISAT/configs.py", "snippet": "ISAT_ROOT = os.path.split(os.path.abspath(__file__))[0]" }, { "identifier": "Object", "path": "ISAT/annotation.py", "snippet": "class Object:\n def __init__(self, category:str, group:int, segmentation, area, layer, bbox, iscrowd=0, note=''):\n self.category = category\n self.group = group\n self.segmentation = segmentation\n self.area = area\n self.layer = layer\n self.bbox = bbox\n self.iscrowd = iscrowd\n self.note = note" }, { "identifier": "Annotation", "path": "ISAT/annotation.py", "snippet": "class Annotation:\n def __init__(self, image_path, label_path):\n img_folder, img_name = os.path.split(image_path)\n self.description = 'ISAT'\n self.img_folder = img_folder\n self.img_name = img_name\n self.label_path = label_path\n self.note = ''\n\n image = np.array(Image.open(image_path))\n if image.ndim == 3:\n self.height, self.width, self.depth = image.shape\n elif image.ndim == 2:\n self.height, self.width = image.shape\n self.depth = 0\n else:\n self.height, self.width, self.depth = image.shape[:, :3]\n print('Warning: Except image has 2 or 3 ndim, but get {}.'.format(image.ndim))\n del image\n\n self.objects:List[Object,...] = []\n\n def load_annotation(self):\n if os.path.exists(self.label_path):\n with open(self.label_path, 'r') as f:\n dataset = load(f)\n info = dataset.get('info', {})\n description = info.get('description', '')\n if description == 'ISAT':\n # ISAT格式json\n objects = dataset.get('objects', [])\n self.img_name = info.get('name', '')\n width = info.get('width', None)\n if width is not None:\n self.width = width\n height = info.get('height', None)\n if height is not None:\n self.height = height\n depth = info.get('depth', None)\n if depth is not None:\n self.depth = depth\n self.note = info.get('note', '')\n for obj in objects:\n category = obj.get('category', 'unknow')\n group = obj.get('group', 0)\n if group is None: group = 0\n segmentation = obj.get('segmentation', [])\n iscrowd = obj.get('iscrowd', 0)\n note = obj.get('note', '')\n area = obj.get('area', 0)\n layer = obj.get('layer', 2)\n bbox = obj.get('bbox', [])\n obj = Object(category, group, segmentation, area, layer, bbox, iscrowd, note)\n self.objects.append(obj)\n else:\n # 不再支持直接打开labelme标注文件(在菜单栏-tool-convert中提供了isat<->labelme相互转换工具)\n print('Warning: The file {} is not a ISAT json.'.format(self.label_path))\n return self\n\n def save_annotation(self):\n dataset = {}\n dataset['info'] = {}\n dataset['info']['description'] = self.description\n dataset['info']['folder'] = self.img_folder\n dataset['info']['name'] = self.img_name\n dataset['info']['width'] = self.width\n dataset['info']['height'] = self.height\n dataset['info']['depth'] = self.depth\n dataset['info']['note'] = self.note\n dataset['objects'] = []\n for obj in self.objects:\n object = {}\n object['category'] = obj.category\n object['group'] = obj.group\n object['segmentation'] = obj.segmentation\n object['area'] = obj.area\n object['layer'] = obj.layer\n object['bbox'] = obj.bbox\n object['iscrowd'] = obj.iscrowd\n object['note'] = obj.note\n dataset['objects'].append(object)\n with open(self.label_path, 'w') as f:\n dump(dataset, f, indent=4)\n return True" }, { "identifier": "Polygon", "path": "ISAT/widgets/polygon.py", "snippet": "class Polygon(QtWidgets.QGraphicsPolygonItem):\n def __init__(self):\n super(Polygon, self).__init__(parent=None)\n self.line_width = 0\n self.hover_alpha = 150\n self.nohover_alpha = 80\n self.points = []\n self.vertexs = []\n self.category = ''\n self.group = 0\n self.iscrowd = 0\n self.note = ''\n\n self.rxmin, self.rxmax, self.rymin, self.rymax = 0, 0, 0, 0 # 用于绘画完成后,记录多边形的各边界,此处与points对应\n self.color = QtGui.QColor('#ff0000')\n self.is_drawing = True\n\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(QtGui.QBrush(self.color, QtCore.Qt.BrushStyle.FDiagPattern))\n\n self.setAcceptHoverEvents(True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemIsMovable, True)\n self.setFlag(QtWidgets.QGraphicsItem.GraphicsItemFlag.ItemSendsGeometryChanges, True)\n self.setZValue(1e5)\n\n def addPoint(self, point):\n print('addPoint')\n self.points.append(point)\n print(self.points)\n vertex = Vertex(self, self.color, 2)\n # 添加路径点\n self.scene().addItem(vertex)\n self.vertexs.append(vertex)\n vertex.setPos(point)\n\n def movePoint(self, index, point):\n if not 0 <= index < len(self.points):\n return\n self.points[index] = self.mapFromScene(point)\n\n self.redraw()\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n def removePoint(self, index):\n if not self.points:\n return\n self.points.pop(index)\n vertex = self.vertexs.pop(index)\n self.scene().removeItem(vertex)\n del vertex\n self.redraw()\n\n def delete(self):\n self.points.clear()\n while self.vertexs:\n vertex = self.vertexs.pop()\n self.scene().removeItem(vertex)\n del vertex\n\n def moveVertex(self, index, point):\n if not 0 <= index < len(self.vertexs):\n return\n vertex = self.vertexs[index]\n vertex.setEnabled(False)\n vertex.setPos(point)\n vertex.setEnabled(True)\n\n def itemChange(self, change: 'QGraphicsItem.GraphicsItemChange', value: typing.Any):\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemSelectedHasChanged and not self.is_drawing: # 选中改变\n if self.isSelected():\n color = QtGui.QColor('#00A0FF')\n color.setAlpha(self.hover_alpha)\n self.setBrush(color)\n else:\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n self.scene().mainwindow.annos_dock_widget.set_selected(self) # 更新label面板\n\n if change == QtWidgets.QGraphicsItem.GraphicsItemChange.ItemPositionChange: # ItemPositionHasChanged\n bias = value\n l, t, b, r = self.boundingRect().left(), self.boundingRect().top(), self.boundingRect().bottom(), self.boundingRect().right()\n if l + bias.x() < 0: bias.setX(-l)\n if r + bias.x() > self.scene().width(): bias.setX(self.scene().width()-r)\n if t + bias.y() < 0: bias.setY(-t)\n if b + bias.y() > self.scene().height(): bias.setY(self.scene().height()-b)\n\n for index, point in enumerate(self.points):\n self.moveVertex(index, point+bias)\n\n if self.scene().mainwindow.load_finished and not self.is_drawing:\n self.scene().mainwindow.set_saved_state(False)\n\n return super(Polygon, self).itemChange(change, value)\n\n def hoverEnterEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.hover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent'):\n if not self.is_drawing and not self.isSelected():\n self.color.setAlpha(self.nohover_alpha)\n self.setBrush(self.color)\n super(Polygon, self).hoverEnterEvent(event)\n\n def mouseDoubleClickEvent(self, event: 'QGraphicsSceneMouseEvent'):\n if event.button() == QtCore.Qt.MouseButton.LeftButton:\n self.scene().mainwindow.category_edit_widget.polygon = self\n self.scene().mainwindow.category_edit_widget.load_cfg()\n self.scene().mainwindow.category_edit_widget.show()\n\n def redraw(self):\n if len(self.points) < 1:\n return\n xs = [p.x() for p in self.points]\n ys = [p.y() for p in self.points]\n self.rxmin, self.rymin, self.rxmax, self.rymax = min(xs), min(ys), max(xs), max(ys)\n self.setPolygon(QtGui.QPolygonF(self.points))\n\n def change_color(self, color):\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n for vertex in self.vertexs:\n vertex_color = self.color\n vertex_color.setAlpha(255)\n vertex.setPen(QtGui.QPen(vertex_color, self.line_width))\n vertex.setBrush(vertex_color)\n\n def set_drawed(self, category, group, iscrowd, note, color:QtGui.QColor, layer=None):\n self.is_drawing = False\n self.category = category\n if isinstance(group, str):\n group = 0 if group == '' else int(group)\n self.group = group\n self.iscrowd = iscrowd\n self.note = note\n\n self.color = color\n self.color.setAlpha(self.nohover_alpha)\n self.setPen(QtGui.QPen(self.color, self.line_width))\n self.setBrush(self.color)\n if layer is not None:\n self.setZValue(layer)\n for vertex in self.vertexs:\n vertex.setColor(color)\n\n def calculate_area(self):\n area = 0\n num_points = len(self.points)\n for i in range(num_points):\n p1 = self.points[i]\n p2 = self.points[(i + 1) % num_points]\n d = p1.x() * p2.y() - p2.x() * p1.y()\n area += d\n return abs(area) / 2\n\n def load_object(self, object):\n segmentation = object.segmentation\n for x, y in segmentation:\n point = QtCore.QPointF(x, y)\n self.addPoint(point)\n color = self.scene().mainwindow.category_color_dict.get(object.category, '#000000')\n self.set_drawed(object.category, object.group, object.iscrowd, object.note, QtGui.QColor(color), object.layer) # ...\n\n def to_object(self):\n if self.is_drawing:\n return None\n segmentation = []\n for point in self.points:\n point = point + self.pos()\n segmentation.append((round(point.x(), 2), round(point.y(), 2)))\n xmin = self.boundingRect().x() + self.pos().x()\n ymin = self.boundingRect().y() + self.pos().y()\n xmax = xmin + self.boundingRect().width()\n ymax = ymin + self.boundingRect().height()\n\n object = Object(self.category, group=self.group, segmentation=segmentation,\n area=self.calculate_area(), layer=self.zValue(), bbox=(xmin, ymin, xmax, ymax), iscrowd=self.iscrowd, note=self.note)\n return object" }, { "identifier": "PromptPoint", "path": "ISAT/widgets/polygon.py", "snippet": "class PromptPoint(QtWidgets.QGraphicsPathItem):\n def __init__(self, pos, type=0):\n super(PromptPoint, self).__init__()\n self.color = QtGui.QColor('#0000FF') if type==0 else QtGui.QColor('#00FF00')\n self.color.setAlpha(255)\n self.painterpath = QtGui.QPainterPath()\n self.painterpath.addEllipse(\n QtCore.QRectF(-1, -1, 2, 2))\n self.setPath(self.painterpath)\n self.setBrush(self.color)\n self.setPen(QtGui.QPen(self.color, 3))\n self.setZValue(1e5)\n\n self.setPos(pos)" }, { "identifier": "ConverterDialog", "path": "ISAT/widgets/converter_dialog.py", "snippet": "class ConverterDialog(QtWidgets.QDialog, Ui_Dialog):\n def __init__(self, parent, mainwindow):\n super(ConverterDialog, self).__init__(parent=parent)\n self.setWindowTitle('转换')\n self.layout = QVBoxLayout()\n self.mainwindow = mainwindow\n self.setWindowModality(QtCore.Qt.WindowModality.WindowModal)\n\n self.path_layout = QHBoxLayout()\n self.button = QPushButton('保存至')\n self.button.clicked.connect(self.select_folder)\n self.path_layout.addWidget(self.button)\n self.path_text = QLineEdit()\n self.path_text.setReadOnly(True)\n self.path_layout.addWidget(self.path_text)\n self.layout.addLayout(self.path_layout)\n\n\n # 最底部居中按钮\n self.bottom_layout = QHBoxLayout()\n self.bottom_layout.addStretch()\n self.bottom_button = QPushButton('转换')\n self.bottom_layout.addWidget(self.bottom_button)\n self.bottom_layout.addStretch()\n self.layout.addLayout(self.bottom_layout)\n self.bottom_button.clicked.connect(self.confirm_action)\n self.setLayout(self.layout)\n\n def select_folder(self):\n folder = QFileDialog.getExistingDirectory(self, '保存至')\n if folder:\n self.path_text.setText(folder)\n\n def confirm_action(self):\n path = self.path_text.text()\n if path == '':\n self.mainwindow.statusBar().showMessage('请先选择保存路径')\n QMessageBox.warning(self, '警告', '请先选择保存路径')\n return\n if not os.path.exists(path):\n os.makedirs(path)\n self.mainwindow.statusBar().showMessage('正在转换')\n labels_dir = self.mainwindow.label_root\n image_dir = self.mainwindow.image_root\n for inx, label in enumerate(os.listdir(labels_dir)):\n print(inx, label)\n label_path = os.path.join(labels_dir, label)\n image_path = os.path.join(image_dir, label[:-5] + '.jpg')\n if not os.path.exists(image_path):\n image_path = os.path.join(image_dir, label[:-5] + '.png')\n if not os.path.exists(image_path):\n image_path = os.path.join(image_dir, label[:-5] + '.jpeg')\n if not os.path.exists(image_path):\n continue\n image = Image.open(image_path)\n with open(label_path, 'r') as f:\n rects = json.load(f)\n \n for inx, rect in enumerate(rects):\n x1, y1, x2, y2 = rect['point1-x'], rect['point1-y'], rect['point2-x'], rect['point2-y']\n left = min(x1, x2)\n right = max(x1, x2)\n top = min(y1, y2)\n bottom = max(y1, y2)\n cropped_image = image.crop((left, top, right, bottom))\n save_path = os.path.join(path, label[:-5] + '_' + str(inx) + image_path[-4:])\n print(save_path)\n cropped_image.save(save_path)\n\n self.mainwindow.statusBar().showMessage('转换完成')\n QMessageBox.warning(self, '提示', '转换完成')" } ]
from PyQt5 import QtWidgets, QtCore, QtGui from ISAT.ui.MainWindow import Ui_MainWindow from ISAT.widgets.files_dock_widget import FilesDockWidget from ISAT.widgets.canvas import AnnotationScene, AnnotationView from ISAT.configs import STATUSMode, MAPMode, load_config, save_config, CONFIG_FILE, DEFAULT_CONFIG_FILE, CHECKPOINT_PATH, ISAT_ROOT from ISAT.annotation import Object, Annotation from ISAT.widgets.polygon import Polygon, PromptPoint from ISAT.widgets.converter_dialog import ConverterDialog from PIL import Image from PyQt5.QtCore import QThread, pyqtSignal import os import json import functools import imgviz import ISAT.icons_rc import numpy as np import cv2 # 调整图像饱和度
12,655
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标 self.current_label:Annotation = None # 新增 手动/自动 group选择 self.group_select_mode = 'auto' # 所有labels self.rects = [] self.is_show_bitmap = False self.init_ui() self.init_connect() self.reset_action() def init_ui(self): #q
# -*- coding: utf-8 -*- # @Author : LG class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): super(MainWindow, self).__init__() self.setupUi(self) self.image_root: str = None self.label_root:str = None self.files_list: list = [] self.current_index = None self.current_file_index: int = None self.current_group = 1 self.config_file = CONFIG_FILE if os.path.exists(CONFIG_FILE) else DEFAULT_CONFIG_FILE self.saved = True self.can_be_annotated = True self.load_finished = False self.png_palette = None # 图像拥有调色盘,说明是单通道的标注png文件 self.instance_cmap = imgviz.label_colormap() # 标注目标 self.current_label:Annotation = None # 新增 手动/自动 group选择 self.group_select_mode = 'auto' # 所有labels self.rects = [] self.is_show_bitmap = False self.init_ui() self.init_connect() self.reset_action() def init_ui(self): #q
self.files_dock_widget = FilesDockWidget(mainwindow=self)
1
2023-12-24 16:19:16+00:00
16k
facebookresearch/ca_body
ca_body/models/mesh_vae_drivable.py
[ { "identifier": "ConvBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvBlock(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n size,\n lrelu_slope=0.2,\n kernel_size=3,\n padding=1,\n wnorm_dim=0,\n ):\n super().__init__()\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n\n # TODO: do we really need this?\n self.conv_resize = Conv2dWN(in_channels, out_channels, kernel_size=1)\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n padding=padding,\n height=size,\n width=size,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "ConvDownBlock", "path": "ca_body/nn/blocks.py", "snippet": "class ConvDownBlock(nn.Module):\n def __init__(self, in_channels, out_channels, size, lrelu_slope=0.2, groups=1, wnorm_dim=0):\n \"\"\"Constructor.\n\n Args:\n in_channels: int, # of input channels\n out_channels: int, # of input channels\n size: the *input* size\n \"\"\"\n super().__init__()\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n\n self.conv_resize = Conv2dWN(\n in_channels, out_channels, kernel_size=1, stride=2, groups=groups\n )\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=3,\n height=size,\n width=size,\n groups=groups,\n padding=1,\n )\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=3,\n stride=2,\n height=size // 2,\n width=size // 2,\n groups=groups,\n padding=1,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_skip = self.conv_resize(x)\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n return x + x_skip" }, { "identifier": "UpConvBlockDeep", "path": "ca_body/nn/blocks.py", "snippet": "class UpConvBlockDeep(nn.Module):\n def __init__(self, in_channels, out_channels, size, lrelu_slope=0.2, wnorm_dim=0, groups=1):\n super().__init__()\n self.upsample = nn.UpsamplingBilinear2d(size)\n\n Conv2dWNUB = weight_norm_wrapper(la.Conv2dUB, \"Conv2dWNUB\", g_dim=wnorm_dim, v_dim=None)\n Conv2dWN = weight_norm_wrapper(th.nn.Conv2d, \"Conv2dWN\", g_dim=wnorm_dim, v_dim=None)\n # NOTE: the old one normalizes only across one dimension\n\n self.conv_resize = Conv2dWN(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n groups=groups,\n )\n self.conv1 = Conv2dWNUB(\n in_channels,\n in_channels,\n kernel_size=3,\n height=size,\n width=size,\n padding=1,\n groups=groups,\n )\n self.lrelu1 = nn.LeakyReLU(lrelu_slope)\n self.conv2 = Conv2dWNUB(\n in_channels,\n out_channels,\n kernel_size=3,\n height=size,\n width=size,\n padding=1,\n groups=groups,\n )\n self.lrelu2 = nn.LeakyReLU(lrelu_slope)\n\n def forward(self, x):\n x_up = self.upsample(x)\n x_skip = self.conv_resize(x_up)\n\n x = x_up\n x = self.conv1(x)\n x = self.lrelu1(x)\n x = self.conv2(x)\n x = self.lrelu2(x)\n\n return x + x_skip" }, { "identifier": "tile2d", "path": "ca_body/nn/blocks.py", "snippet": "def tile2d(x, size: int):\n \"\"\"Tile a given set of features into a convolutional map.\n\n Args:\n x: float tensor of shape [N, F]\n size: int or a tuple\n\n Returns:\n a feature map [N, F, size[0], size[1]]\n \"\"\"\n # size = size if isinstance(size, tuple) else (size, size)\n # NOTE: expecting only int here (!!!)\n return x[:, :, np.newaxis, np.newaxis].expand(-1, -1, size, size)" }, { "identifier": "weights_initializer", "path": "ca_body/nn/blocks.py", "snippet": "def weights_initializer(lrelu_slope=0.2):\n # pyre-ignore\n def init_fn(m):\n if isinstance(\n m,\n (\n nn.Conv2d,\n nn.Conv1d,\n nn.ConvTranspose2d,\n nn.Linear,\n ),\n ):\n gain = nn.init.calculate_gain(\"leaky_relu\", lrelu_slope)\n nn.init.kaiming_uniform_(m.weight.data, a=gain)\n if hasattr(m, \"bias\") and m.bias is not None:\n nn.init.zeros_(m.bias.data)\n else:\n logger.debug(f\"skipping initialization for {m}\")\n\n return init_fn" }, { "identifier": "LearnableBlur", "path": "ca_body/nn/dof_cal.py", "snippet": "class LearnableBlur(nn.Module):\n # TODO: should we make this conditional?\n def __init__(self, cameras: List[str]) -> None:\n super().__init__()\n self.cameras = cameras\n self.register_parameter(\n \"weights_raw\", nn.Parameter(th.ones(len(cameras), 3, dtype=th.float32))\n )\n\n def name_to_idx(self, cameras: List[str]) -> th.Tensor:\n return th.tensor(\n [self.cameras.index(c) for c in cameras],\n device=self.weights_raw.device,\n dtype=th.long,\n )\n\n # pyre-ignore\n def reg(self, cameras: List[str]):\n # pyre-ignore\n idxs = self.name_to_idx(cameras)\n # pyre-ignore\n return self.weights_raw[idxs]\n\n # pyre-ignore\n def forward(self, img: th.Tensor, cameras: List[str]):\n B = img.shape[0]\n # B, C, H, W\n idxs = self.name_to_idx(cameras)\n # TODO: mask?\n # pyre-ignore\n weights = th.softmax(self.weights_raw[idxs], dim=-1)\n weights = weights.reshape(B, 3, 1, 1, 1)\n return (\n weights[:, 0] * img\n + weights[:, 1] * gaussian_blur(img, [3, 3])\n + weights[:, 2] * gaussian_blur(img, [7, 7])\n )" }, { "identifier": "GeometryModule", "path": "ca_body/utils/geom.py", "snippet": "class GeometryModule(nn.Module):\n def __init__(\n self,\n vi,\n vt,\n vti,\n v2uv,\n uv_size,\n flip_uv=False,\n impaint=False,\n impaint_threshold=100.0,\n ):\n super().__init__()\n\n self.register_buffer(\"vi\", th.as_tensor(vi))\n self.register_buffer(\"vt\", th.as_tensor(vt))\n self.register_buffer(\"vti\", th.as_tensor(vti))\n self.register_buffer(\"v2uv\", th.as_tensor(v2uv, dtype=th.int64))\n\n # TODO: should we just pass topology here?\n self.n_verts = v2uv.shape[0]\n\n self.uv_size = uv_size\n\n # TODO: can't we just index face_index?\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n ).cpu()\n face_index, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n if impaint:\n if uv_size >= 1024:\n logger.info(\n \"impainting index image might take a while for sizes >= 1024\"\n )\n\n index_image, bary_image = index_image_impaint(\n index_image, bary_image, impaint_threshold\n )\n # TODO: we can avoid doing this 2x\n face_index = index_image_impaint(\n face_index, distance_threshold=impaint_threshold\n )\n\n self.register_buffer(\"index_image\", index_image.cpu())\n self.register_buffer(\"bary_image\", bary_image.cpu())\n self.register_buffer(\"face_index_image\", face_index.cpu())\n\n def render_index_images(self, uv_size, flip_uv=False, impaint=False):\n index_image = make_uv_vert_index(\n self.vt, self.vi, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n face_image, bary_image = make_uv_barys(\n self.vt, self.vti, uv_shape=uv_size, flip_uv=flip_uv\n )\n\n if impaint:\n index_image, bary_image = index_image_impaint(\n index_image,\n bary_image,\n )\n\n return index_image, face_image, bary_image\n\n def vn(self, verts):\n return vert_normals(verts, self.vi[np.newaxis].to(th.long))\n\n def to_uv(self, values):\n return values_to_uv(values, self.index_image, self.bary_image)\n\n def from_uv(self, values_uv):\n # TODO: we need to sample this\n return sample_uv(values_uv, self.vt, self.v2uv.to(th.long))" }, { "identifier": "compute_view_cos", "path": "ca_body/utils/geom.py", "snippet": "def compute_view_cos(verts, faces, camera_pos):\n vn = F.normalize(vert_normals(verts, faces), dim=-1)\n v2c = F.normalize(verts - camera_pos[:, np.newaxis], dim=-1)\n return th.einsum(\"bnd,bnd->bn\", vn, v2c)" }, { "identifier": "depth_discontuity_mask", "path": "ca_body/utils/geom.py", "snippet": "def depth_discontuity_mask(\n depth: th.Tensor, threshold: float = 40.0, kscale: float = 4.0, pool_ksize: int = 3\n) -> th.Tensor:\n device = depth.device\n\n with th.no_grad():\n # TODO: pass the kernel?\n kernel = th.as_tensor(\n [\n [[[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]],\n [[[-1, -2, -1], [0, 0, 0], [1, 2, 1]]],\n ],\n dtype=th.float32,\n device=device,\n )\n\n disc_mask = (th.norm(F.conv2d(depth, kernel, bias=None, padding=1), dim=1) > threshold)[\n :, np.newaxis\n ]\n disc_mask = (\n F.avg_pool2d(disc_mask.float(), pool_ksize, stride=1, padding=pool_ksize // 2) > 0.0\n )\n\n return disc_mask" }, { "identifier": "depth2normals", "path": "ca_body/utils/geom.py", "snippet": "def depth2normals(depth, focal, princpt) -> th.Tensor:\n \"\"\"Convert depth image to normal image using camera intrinsics\n\n Args:\n depth: th.Tensor\n [B, 1, H, W] depth image\n\n focal: th.Tensor\n [B, 2, 2] camera focal lengths\n\n princpt: th.Tensor\n [B, 2] camera principal points\n\n Returns:\n th.Tensor: [B, 3, H, W] normal image\n \"\"\"\n\n return xyz2normals(depth2xyz(depth, focal, princpt))" }, { "identifier": "ShadowUNet", "path": "ca_body/nn/shadow.py", "snippet": "class ShadowUNet(nn.Module):\n def __init__(\n self,\n uv_size,\n ao_mean,\n shadow_size,\n lrelu_slope=0.2,\n beta=1.0,\n n_dims=64,\n interp_mode=\"bilinear\",\n biases=True,\n trainable_mean=False,\n ):\n super().__init__()\n\n # this is the size of the output\n self.uv_size = uv_size\n self.shadow_size = shadow_size\n\n ao_mean = F.interpolate(\n th.as_tensor(ao_mean)[np.newaxis],\n size=(self.shadow_size, self.shadow_size),\n )[0]\n if not trainable_mean:\n # TODO:\n self.register_buffer(\"ao_mean\", ao_mean)\n else:\n self.register_parameter(\"ao_mean\", th.nn.Parameter(ao_mean))\n\n self.depth = 3\n self.lrelu_slope = lrelu_slope\n self.interp_mode = interp_mode\n self.align_corners = None\n if interp_mode == \"bilinear\":\n self.align_corners = False\n\n # the base number of dimensions for the shadow maps\n n_dims = n_dims\n\n # TODO: generate this?\n self.n_enc_dims = [\n (1, n_dims),\n (n_dims, n_dims),\n (n_dims, n_dims),\n (n_dims, n_dims),\n ]\n\n self.sizes = [shadow_size // (2**i) for i in range(len(self.n_enc_dims))]\n\n logger.debug(f\"sizes: {self.sizes}\")\n\n self.enc_layers = nn.ModuleList()\n for i, size in enumerate(self.sizes):\n n_in, n_out = self.n_enc_dims[i]\n logger.debug(f\"EncoderLayers({i}): {n_in}, {n_out}, {size}\")\n self.enc_layers.append(\n nn.Sequential(\n la.Conv2dWNUB(\n n_in,\n n_out,\n kernel_size=3,\n height=size,\n width=size,\n stride=1,\n padding=1,\n ),\n nn.LeakyReLU(self.lrelu_slope, inplace=True),\n )\n )\n\n self.n_dec_dims = [\n (n_dims, n_dims),\n (n_dims * 2, n_dims),\n (n_dims * 2, n_dims),\n (n_dims * 2, n_dims),\n ]\n self.dec_layers = nn.ModuleList()\n for i in range(len(self.sizes)):\n size = self.sizes[-i - 1]\n n_in, n_out = self.n_dec_dims[i]\n logger.debug(f\"DecoderLayer({i}): {n_in}, {n_out}, {size}\")\n\n self.dec_layers.append(\n nn.Sequential(\n la.Conv2dWNUB(\n n_in,\n n_out,\n kernel_size=3,\n height=size,\n width=size,\n stride=1,\n padding=1,\n ),\n nn.LeakyReLU(self.lrelu_slope, inplace=True),\n )\n )\n\n self.apply(weights_initializer(self.lrelu_slope))\n\n if biases:\n self.shadow_pred = la.Conv2dWNUB(\n self.n_dec_dims[-1][-1],\n 1,\n kernel_size=3,\n height=self.sizes[0],\n width=self.sizes[0],\n stride=1,\n padding=1,\n )\n else:\n self.shadow_pred = la.Conv2dWN(\n self.n_dec_dims[-1][-1],\n 1,\n kernel_size=3,\n stride=1,\n padding=1,\n )\n\n self.shadow_pred.apply(weights_initializer(1.0))\n self.beta = beta\n\n def forward(self, ao_map):\n # resizing the inputs if necessary\n if ao_map.shape[-2:] != (self.shadow_size, self.shadow_size):\n ao_map = F.interpolate(ao_map, size=(self.shadow_size, self.shadow_size))\n\n x = ao_map - self.ao_mean\n\n enc_acts = []\n # unet enc\n for i, layer in enumerate(self.enc_layers):\n # TODO: try applying a 1D sparse op?\n x = layer(x)\n enc_acts.append(x)\n # TODO: add this layer elsewhere?\n if i < len(self.sizes) - 1:\n x = F.interpolate(\n x,\n scale_factor=0.5,\n mode=\"bilinear\",\n recompute_scale_factor=True,\n align_corners=True,\n )\n\n # we do not need the last one?\n for i, layer in enumerate(self.dec_layers):\n if i > 0:\n x_prev = enc_acts[-i - 1]\n x = F.interpolate(x, size=x_prev.shape[2:4], mode=\"bilinear\", align_corners=True)\n x = th.cat([x, x_prev], dim=1)\n x = layer(x)\n\n shadow_map_lowres = th.sigmoid(self.shadow_pred(x) + self.beta)\n shadow_map = F.interpolate(\n shadow_map_lowres,\n (self.uv_size, self.uv_size),\n mode=self.interp_mode,\n align_corners=self.align_corners,\n )\n\n return {\n \"shadow_map\": shadow_map,\n \"ao_map\": ao_map,\n \"shadow_map_lowres\": shadow_map_lowres,\n }" }, { "identifier": "PoseToShadow", "path": "ca_body/nn/shadow.py", "snippet": "class PoseToShadow(nn.Module):\n def __init__(\n self,\n n_pose_dims,\n uv_size,\n beta=1.0,\n ) -> None:\n super().__init__()\n self.n_pose_dims = n_pose_dims\n self.uv_size = uv_size\n\n self.fc_block = nn.Sequential(\n la.LinearWN(self.n_pose_dims, 256 * 4 * 4),\n nn.LeakyReLU(0.2),\n )\n self.conv_block = nn.Sequential(\n la.ConvTranspose2dWNUB(256, 256, 8, 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(256, 128, 16, 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(128, 128, 32, 32, 4, 2, 1),\n nn.LeakyReLU(0.2),\n la.ConvTranspose2dWNUB(128, 64, 64, 64, 4, 2, 1),\n nn.LeakyReLU(0.2),\n # la.ConvTranspose2dWNUB(64, 64, 128, 128, 4, 2, 1),\n # nn.LeakyReLU(0.2),\n # la.ConvTranspose2dWNUB(64, 1, 256, 256, 4, 2, 1),\n la.ConvTranspose2dWNUB(64, 1, 128, 128, 4, 2, 1),\n )\n self.beta = beta\n self.apply(lambda x: la.glorot(x, 0.2))\n la.glorot(self.conv_block[-1], 1.0)\n\n def forward(self, pose: th.Tensor):\n assert pose.shape\n x = self.fc_block(pose)\n x = self.conv_block(x.reshape(-1, 256, 4, 4))\n shadow_map_lowres = th.sigmoid(x + self.beta)\n\n shadow_map = F.interpolate(\n shadow_map_lowres, size=(self.uv_size, self.uv_size), mode=\"bilinear\"\n )\n return {\"shadow_map\": shadow_map}" }, { "identifier": "UNetWB", "path": "ca_body/nn/unet.py", "snippet": "class UNetWB(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n size: int,\n n_init_ftrs: int=8,\n out_scale: float =0.1,\n ):\n # super().__init__(*args, **kwargs)\n super().__init__()\n\n self.out_scale = out_scale\n\n F = n_init_ftrs\n\n self.size = size\n\n self.down1 = nn.Sequential(\n Conv2dWNUB(in_channels, F, self.size // 2, self.size // 2, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down2 = nn.Sequential(\n Conv2dWNUB(F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down3 = nn.Sequential(\n Conv2dWNUB(2 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down4 = nn.Sequential(\n Conv2dWNUB(4 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.down5 = nn.Sequential(\n Conv2dWNUB(8 * F, 16 * F, self.size // 32, self.size // 32, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up1 = nn.Sequential(\n ConvTranspose2dWNUB(16 * F, 8 * F, self.size // 16, self.size // 16, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up2 = nn.Sequential(\n ConvTranspose2dWNUB(8 * F, 4 * F, self.size // 8, self.size // 8, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up3 = nn.Sequential(\n ConvTranspose2dWNUB(4 * F, 2 * F, self.size // 4, self.size // 4, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up4 = nn.Sequential(\n ConvTranspose2dWNUB(2 * F, F, self.size // 2, self.size // 2, 4, 2, 1),\n nn.LeakyReLU(0.2),\n )\n self.up5 = nn.Sequential(\n ConvTranspose2dWNUB(F, F, self.size, self.size, 4, 2, 1), nn.LeakyReLU(0.2)\n )\n self.out = Conv2dWNUB(F + in_channels, out_channels, self.size, self.size, kernel_size=1)\n self.apply(lambda x: glorot(x, 0.2))\n glorot(self.out, 1.0)\n\n def forward(self, x):\n x1 = x\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n x6 = self.down5(x5)\n # TODO: switch to concat?\n x = self.up1(x6) + x5\n x = self.up2(x) + x4\n x = self.up3(x) + x3\n x = self.up4(x) + x2\n x = self.up5(x)\n x = th.cat([x, x1], dim=1)\n return self.out(x) * self.out_scale" }, { "identifier": "CalV5", "path": "ca_body/nn/color_cal.py", "snippet": "class CalV5(CalBase):\n def __init__(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n cameras,\n # pyre-fixme[2]: Parameter must be annotated.\n identity_camera,\n gs_lrscale: float = 1e0,\n col_lrscale: float = 1e-1,\n ) -> None:\n super(CalBase, self).__init__()\n\n if identity_camera not in cameras:\n identity_camera = cameras[0]\n logger.warning(\n f\"Requested color-calibration identity camera not present, defaulting to {identity_camera}.\"\n )\n\n # pyre-fixme[4]: Attribute must be annotated.\n self.identity_camera = identity_camera\n # pyre-fixme[4]: Attribute must be annotated.\n self.cameras = cameras\n self.gs_lrscale = gs_lrscale\n self.col_lrscale = col_lrscale\n self.holder: ParamHolder = ParamHolder(\n # pyre-fixme[6]: For 1st param expected `Tuple[int]` but got `int`.\n 3 + 3,\n cameras,\n init_value=th.FloatTensor([1, 1, 1, 0, 0, 0]),\n )\n\n # pyre-fixme[4]: Attribute must be annotated.\n self.identity_idx = self.holder.to_idx([identity_camera]).item()\n # pyre-fixme[4]: Attribute must be annotated.\n self.grey_idxs = [self.holder.to_idx([c]).item() for c in cameras if c.startswith(\"41\")]\n\n s = th.FloatTensor([0.37, 0.52, 0.52])\n self.holder.params.data[th.LongTensor(self.grey_idxs), :3] = s\n\n def name_to_idx(self, cam_names: Sequence[str]) -> th.Tensor:\n return self.holder.to_idx(cam_names)\n\n # pyre-fixme[2]: Parameter must be annotated.\n def initialize_from_texs(self, ds) -> float:\n tex_mean = ds.tex_mean.permute(1, 2, 0)\n texs = {}\n idx = 0\n while ds[idx] is None:\n idx += 1\n\n for cam in self.cameras:\n samp = ds[idx, cam]\n if samp is None:\n continue\n\n tex = samp[\"tex\"]\n texs[cam] = tex.permute(1, 2, 0)\n\n stats = {}\n for cam in texs.keys():\n t = texs[cam]\n mask = (t > 0).all(dim=2)\n t = t * ds.tex_std + tex_mean\n stats[cam] = (t[mask].mean(dim=0), t[mask].std(dim=0))\n\n normstats = {}\n for cam in texs.keys():\n mean, std = stats[cam]\n imean, istd = stats[self.identity_camera]\n scale = istd / std\n bias = imean - scale * mean\n normstats[cam] = (scale.clamp(max=2), bias)\n\n for cam, nstats in normstats.items():\n cidx = self.name_to_idx([cam])[0]\n if cidx in self.grey_idxs:\n nstats = (nstats[0] / 3, nstats[1] / 3)\n self.holder.params.data[cidx, 0:3] = nstats[0]\n self.holder.params.data[cidx, 3:6] = nstats[1]\n return len(stats.keys()) / len(ds.cameras)\n\n # pyre-fixme[3]: Return type must be annotated.\n # pyre-fixme[2]: Parameter must be annotated.\n # pyre-fixme[14]: `load_state_dict` overrides method defined in `Module`\n # inconsistently.\n def load_state_dict(self, state_dict, strict: bool = True):\n state_dict = {k[7:]: v for k, v in state_dict.items() if k.startswith(\"holder.\")}\n return self.holder.load_state_dict(state_dict, strict=strict)\n\n # pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently.\n # pyre-fixme[3]: Return type must be annotated.\n def state_dict(\n self,\n # pyre-fixme[2]: Parameter must be annotated.\n destination=None,\n prefix: str = \"\",\n keep_vars: bool = False,\n saving: bool = False,\n ):\n sd = super(CalBase, self).state_dict(\n destination=destination, prefix=prefix, keep_vars=keep_vars\n )\n if saving:\n sd[prefix + \"holder.key_list\"] = self.holder.key_list\n return sd\n\n def forward(self, image: th.Tensor, cam_idxs: th.Tensor) -> th.Tensor:\n params = self.holder(cam_idxs)\n outs = []\n hook_scales = []\n for i in range(cam_idxs.shape[0]):\n idx = cam_idxs[i]\n img = image[i : i + 1]\n if idx == self.identity_idx:\n outs.append(img)\n hook_scales.append(1)\n continue\n\n w, b = params[i, :3], params[i, 3:]\n if idx in self.grey_idxs:\n b = b.sum()\n out = (img * w[None, :, None, None]).sum(dim=1, keepdim=True).expand(\n -1, 3, -1, -1\n ) + b\n else:\n out = img * w[None, :, None, None] + b[None, :, None, None]\n outs.append(out)\n hook_scales.append(self.gs_lrscale if idx in self.grey_idxs else self.col_lrscale)\n\n hook_scales = th.tensor(hook_scales, device=image.device, dtype=th.float32)\n cal_out = th.cat(outs)\n\n if self.training and params.requires_grad:\n params.register_hook(lambda g, hs=hook_scales: scale_hook(g, hs[:, None]))\n return cal_out" }, { "identifier": "linear2displayBatch", "path": "ca_body/utils/image.py", "snippet": "def linear2displayBatch(\n val: th.Tensor,\n gamma: float = 1.5,\n wbscale: np.ndarray = __DEFAULT_WB_SCALE,\n black: float = 5.0 / 255.0,\n mode: str = \"srgb\",\n) -> th.Tensor:\n scaling: th.Tensor = th.from_numpy(wbscale).to(val.device)\n val = val.float() / 255.0 * scaling[None, :, None, None] - black\n if mode == \"srgb\":\n val = linear2srgb(val, gamma=gamma)\n else:\n val = val ** th.tensor(1.0 / gamma)\n return th.clamp(val, 0, 1) * 255.0" }, { "identifier": "LBSModule", "path": "ca_body/utils/lbs.py", "snippet": "class LBSModule(nn.Module):\n def __init__(\n self, lbs_model_json, lbs_config_dict, lbs_template_verts, lbs_scale, global_scaling\n ):\n super().__init__()\n self.lbs_fn = LinearBlendSkinning(lbs_model_json, lbs_config_dict)\n\n self.register_buffer(\"lbs_scale\", th.as_tensor(lbs_scale, dtype=th.float32))\n self.register_buffer(\n \"lbs_template_verts\", th.as_tensor(lbs_template_verts, dtype=th.float32)\n )\n self.register_buffer(\"global_scaling\", th.as_tensor(global_scaling))\n\n def pose(self, verts_unposed, motion, template: Optional[th.Tensor] = None):\n scale = self.lbs_scale.expand(motion.shape[0], -1)\n if template is None:\n template = self.lbs_template_verts\n return self.lbs_fn(motion, scale, verts_unposed + template) * self.global_scaling\n\n def unpose(self, verts, motion):\n B = motion.shape[0]\n scale = self.lbs_scale.expand(B, -1)\n return (\n self.lbs_fn.unpose(motion, scale, verts / self.global_scaling) - self.lbs_template_verts\n )\n\n def template_pose(self, motion):\n B = motion.shape[0]\n scale = self.lbs_scale.expand(B, -1)\n verts = self.lbs_template_verts[np.newaxis].expand(B, -1, -1)\n return self.lbs_fn(motion, scale, verts) * self.global_scaling[np.newaxis]" }, { "identifier": "RenderLayer", "path": "ca_body/utils/render.py", "snippet": "class RenderLayer(nn.Module):\n \n def __init__(self, h, w, vi, vt, vti, flip_uvs=False):\n super().__init__()\n self.register_buffer(\"vi\", vi, persistent=False)\n self.register_buffer(\"vt\", vt, persistent=False)\n self.register_buffer(\"vti\", vti, persistent=False)\n raster_settings = RasterizationSettings(image_size=(h, w))\n self.rasterizer = MeshRasterizer(raster_settings=raster_settings)\n self.flip_uvs = flip_uvs \n image_size = th.as_tensor([h, w], dtype=th.int32)\n self.register_buffer(\"image_size\", image_size)\n \n def forward(self, verts: th.Tensor, tex: th.Tensor, K: th.Tensor, Rt: th.Tensor, background: th.Tensor = None, output_filters: List[str] = None):\n\n assert output_filters is None\n assert background is None\n\n B = verts.shape[0]\n\n image_size = self.image_size[None].repeat(B, 1)\n \n cameras = cameras_from_opencv_projection(Rt[:,:,:3], Rt[:,:3,3], K, image_size)\n\n faces = self.vi[None].repeat(B, 1, 1)\n faces_uvs = self.vti[None].repeat(B, 1, 1)\n verts_uvs = self.vt[None].repeat(B, 1, 1) \n \n # NOTE: this is confusing but correct\n if not self.flip_uvs:\n tex = th.flip(tex.permute(0, 2, 3, 1), (1,))\n\n textures = TexturesUV(\n maps=tex,\n faces_uvs=faces_uvs,\n verts_uvs=verts_uvs,\n ) \n meshes = Meshes(verts, faces, textures=textures)\n \n fragments = self.rasterizer(meshes, cameras=cameras)\n rgb = meshes.sample_textures(fragments)[:,:,:,0] \n rgb[fragments.pix_to_face[...,0] == -1] = 0.0 \n\n return {'render': rgb.permute(0, 3, 1, 2)}" }, { "identifier": "SeamSampler", "path": "ca_body/utils/seams.py", "snippet": "class SeamSampler(nn.Module):\n def __init__(self, seamless_data: Dict[str, Any]) -> None:\n super().__init__()\n\n self.register_buffer(\"dst_ij\", seamless_data[\"dst_ij\"])\n self.register_buffer(\"src_ij\", seamless_data[\"src_ij\"])\n self.register_buffer(\"uvs\", seamless_data[\"uvs\"])\n self.register_buffer(\"weights\", seamless_data[\"weights\"])\n\n def impaint(self, value: th.Tensor) -> th.Tensor:\n return impaint_batch(value, self.dst_ij, self.src_ij)\n\n def resample(self, tex: th.Tensor) -> th.Tensor:\n return resample_tex(tex, self.uvs, self.weights)\n\n def resample_border_only(self, tex: th.Tensor) -> th.Tensor:\n tex = resample_tex(tex, self.uvs, self.weights)\n return tex\n\n def forward(self, tex: th.Tensor) -> th.Tensor:\n x = self.impaint(tex)\n x = self.resample(x)\n return x" }, { "identifier": "RenderLayer", "path": "ca_body/utils/render.py", "snippet": "class RenderLayer(nn.Module):\n \n def __init__(self, h, w, vi, vt, vti, flip_uvs=False):\n super().__init__()\n self.register_buffer(\"vi\", vi, persistent=False)\n self.register_buffer(\"vt\", vt, persistent=False)\n self.register_buffer(\"vti\", vti, persistent=False)\n raster_settings = RasterizationSettings(image_size=(h, w))\n self.rasterizer = MeshRasterizer(raster_settings=raster_settings)\n self.flip_uvs = flip_uvs \n image_size = th.as_tensor([h, w], dtype=th.int32)\n self.register_buffer(\"image_size\", image_size)\n \n def forward(self, verts: th.Tensor, tex: th.Tensor, K: th.Tensor, Rt: th.Tensor, background: th.Tensor = None, output_filters: List[str] = None):\n\n assert output_filters is None\n assert background is None\n\n B = verts.shape[0]\n\n image_size = self.image_size[None].repeat(B, 1)\n \n cameras = cameras_from_opencv_projection(Rt[:,:,:3], Rt[:,:3,3], K, image_size)\n\n faces = self.vi[None].repeat(B, 1, 1)\n faces_uvs = self.vti[None].repeat(B, 1, 1)\n verts_uvs = self.vt[None].repeat(B, 1, 1) \n \n # NOTE: this is confusing but correct\n if not self.flip_uvs:\n tex = th.flip(tex.permute(0, 2, 3, 1), (1,))\n\n textures = TexturesUV(\n maps=tex,\n faces_uvs=faces_uvs,\n verts_uvs=verts_uvs,\n ) \n meshes = Meshes(verts, faces, textures=textures)\n \n fragments = self.rasterizer(meshes, cameras=cameras)\n rgb = meshes.sample_textures(fragments)[:,:,:,0] \n rgb[fragments.pix_to_face[...,0] == -1] = 0.0 \n\n return {'render': rgb.permute(0, 3, 1, 2)}" }, { "identifier": "FaceDecoderFrontal", "path": "ca_body/nn/face.py", "snippet": "class FaceDecoderFrontal(nn.Module):\n def __init__(\n self,\n assets: AttrDict,\n n_latent: int = 256,\n n_vert_out: int = 3 * 7306,\n tex_out_shp: Tuple[int, int] = (1024, 1024),\n tex_roi: Tuple[Tuple[int, int], Tuple[int, int]] = ((0, 0), (1024, 1024)),\n ) -> None:\n super().__init__()\n self.n_latent = n_latent\n self.n_vert_out = n_vert_out\n self.tex_roi = tex_roi\n self.tex_roi_shp: Tuple[int, int] = tuple(\n [int(i) for i in np.diff(np.array(tex_roi), axis=0).squeeze()]\n )\n self.tex_out_shp = tex_out_shp\n\n self.encmod = nn.Sequential(la.LinearWN(n_latent, 256), nn.LeakyReLU(0.2, inplace=True))\n self.geommod = nn.Sequential(la.LinearWN(256, n_vert_out))\n\n self.viewmod = nn.Sequential(la.LinearWN(3, 8), nn.LeakyReLU(0.2, inplace=True))\n self.texmod2 = nn.Sequential(\n la.LinearWN(256 + 8, 256 * 4 * 4), nn.LeakyReLU(0.2, inplace=True)\n )\n self.texmod = nn.Sequential(\n la.ConvTranspose2dWNUB(256, 256, 8, 8, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(256, 128, 16, 16, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(128, 128, 32, 32, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(128, 64, 64, 64, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(64, 64, 128, 128, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(64, 32, 256, 256, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(32, 8, 512, 512, 4, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n la.ConvTranspose2dWNUB(8, 3, 1024, 1024, 4, 2, 1),\n )\n\n self.bias = nn.Parameter(th.zeros(3, self.tex_roi_shp[0], self.tex_roi_shp[1]))\n self.bias.data.zero_()\n\n self.register_buffer(\n \"frontal_view\", th.as_tensor(assets.face_frontal_view, dtype=th.float32)\n )\n\n self.apply(lambda x: la.glorot(x, 0.2))\n la.glorot(self.texmod[-1], 1.0)\n\n def forward(self, face_embs: th.Tensor) -> Dict[str, th.Tensor]:\n B = face_embs.shape[0]\n view = self.frontal_view[np.newaxis].expand(B, -1)\n encout = self.encmod(face_embs)\n geomout = self.geommod(encout)\n viewout = self.viewmod(view)\n encview = th.cat([encout, viewout], dim=1)\n texout = self.texmod(self.texmod2(encview).view(-1, 256, 4, 4))\n out = {\"face_geom\": geomout.view(geomout.shape[0], -1, 3)}\n out[\"face_tex_raw\"] = texout\n texout = texout + self.bias[None]\n out[\"face_tex\"] = 255 * (texout + 0.5)\n return out" } ]
import logging import numpy as np import torch as th import torch.nn as nn import torch.nn.functional as F import ca_body.nn.layers as la from typing import Dict, Optional, Tuple from torchvision.utils import make_grid from torchvision.transforms.functional import gaussian_blur from ca_body.nn.blocks import ( ConvBlock, ConvDownBlock, UpConvBlockDeep, tile2d, weights_initializer, ) from ca_body.nn.dof_cal import LearnableBlur from ca_body.utils.geom import ( GeometryModule, compute_view_cos, depth_discontuity_mask, depth2normals, ) from ca_body.nn.shadow import ShadowUNet, PoseToShadow from ca_body.nn.unet import UNetWB from ca_body.nn.color_cal import CalV5 from ca_body.utils.image import linear2displayBatch from ca_body.utils.lbs import LBSModule from ca_body.utils.render import RenderLayer from ca_body.utils.seams import SeamSampler from ca_body.utils.render import RenderLayer from ca_body.nn.face import FaceDecoderFrontal
11,387
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) class CameraPixelBias(nn.Module): def __init__(self, image_height, image_width, cameras, ds_rate) -> None: super().__init__() self.image_height = image_height self.image_width = image_width self.cameras = cameras self.n_cameras = len(cameras) bias = th.zeros( (self.n_cameras, 1, image_width // ds_rate, image_height // ds_rate), dtype=th.float32 ) self.register_parameter("bias", nn.Parameter(bias)) def forward(self, idxs: th.Tensor): bias_up = F.interpolate( self.bias[idxs], size=(self.image_height, self.image_width), mode='bilinear' ) return bias_up class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt')
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) class CameraPixelBias(nn.Module): def __init__(self, image_height, image_width, cameras, ds_rate) -> None: super().__init__() self.image_height = image_height self.image_width = image_width self.cameras = cameras self.n_cameras = len(cameras) bias = th.zeros( (self.n_cameras, 1, image_width // ds_rate, image_height // ds_rate), dtype=th.float32 ) self.register_parameter("bias", nn.Parameter(bias)) def forward(self, idxs: th.Tensor): bias_up = F.interpolate( self.bias[idxs], size=(self.image_height, self.image_width), mode='bilinear' ) return bias_up class AutoEncoder(nn.Module): def __init__( self, encoder, decoder, decoder_view, encoder_face, # hqlp decoder to get the codes decoder_face, shadow_net, upscale_net, assets, pose_to_shadow=None, renderer=None, cal=None, pixel_cal=None, learn_blur: bool = True, ): super().__init__() # TODO: should we have a shared LBS here? self.geo_fn = GeometryModule( assets.topology.vi, assets.topology.vt, assets.topology.vti, assets.topology.v2uv, uv_size=1024, impaint=True, ) self.lbs_fn = LBSModule( assets.lbs_model_json, assets.lbs_config_dict, assets.lbs_template_verts, assets.lbs_scale, assets.global_scaling, ) self.seam_sampler = SeamSampler(assets.seam_data_1024) self.seam_sampler_2k = SeamSampler(assets.seam_data_2048) # joint tex -> body and clothes # TODO: why do we have a joint one in the first place? tex_mean = gaussian_blur(th.as_tensor(assets.tex_mean)[np.newaxis], kernel_size=11) self.register_buffer("tex_mean", F.interpolate(tex_mean, (2048, 2048), mode='bilinear')) # this is shared self.tex_std = assets.tex_var if 'tex_var' in assets else 64.0 face_cond_mask = th.as_tensor(assets.face_cond_mask, dtype=th.float32)[ np.newaxis, np.newaxis ] self.register_buffer("face_cond_mask", face_cond_mask) meye_mask = self.geo_fn.to_uv( th.as_tensor(assets.mouth_eyes_mask_geom[np.newaxis, :, np.newaxis]) ) meye_mask = F.interpolate(meye_mask, (2048, 2048), mode='bilinear') self.register_buffer("meye_mask", meye_mask) self.decoder = ConvDecoder( geo_fn=self.geo_fn, seam_sampler=self.seam_sampler, **decoder, assets=assets, ) # embs for everything but face non_head_mask = 1.0 - assets.face_mask self.encoder = Encoder( geo_fn=self.geo_fn, mask=non_head_mask, **encoder, ) self.encoder_face = FaceEncoder( assets=assets, **encoder_face, ) # using face decoder to generate better conditioning decoder_face_ckpt_path = None if 'ckpt' in decoder_face: decoder_face_ckpt_path = decoder_face.pop('ckpt')
self.decoder_face = FaceDecoderFrontal(assets=assets, **decoder_face)
19
2023-12-27 15:31:35+00:00
16k
open-mmlab/Amphion
models/tts/valle/valle_inference.py
[ { "identifier": "G2PModule", "path": "text/g2p_module.py", "snippet": "class G2PModule:\n \"\"\"Phonemize Text.\"\"\"\n\n def __init__(\n self,\n language=\"en-us\",\n backend=\"espeak\",\n separator=Separator(word=\"_\", syllable=\"-\", phone=\"|\"),\n preserve_punctuation=True,\n punctuation_marks: Union[str, Pattern] = Punctuation.default_marks(),\n with_stress: bool = False,\n tie: Union[bool, str] = False,\n language_switch: LanguageSwitch = \"keep-flags\",\n words_mismatch: WordMismatch = \"ignore\",\n ) -> None:\n self.backend = self._initialize_backend(\n backend,\n language,\n punctuation_marks,\n preserve_punctuation,\n with_stress,\n tie,\n language_switch,\n words_mismatch,\n )\n self.separator = separator\n\n def _initialize_backend(\n self,\n backend,\n language,\n punctuation_marks,\n preserve_punctuation,\n with_stress,\n tie,\n language_switch,\n words_mismatch,\n ):\n if backend == \"espeak\":\n return EspeakBackend(\n language,\n punctuation_marks=punctuation_marks,\n preserve_punctuation=preserve_punctuation,\n with_stress=with_stress,\n tie=tie,\n language_switch=language_switch,\n words_mismatch=words_mismatch,\n )\n elif backend in [\"pypinyin\", \"pypinyin_initials_finals\"]:\n return PypinyinBackend(\n backend=backend,\n punctuation_marks=punctuation_marks + self.separator.word,\n )\n else:\n raise NotImplementedError(f\"{backend}\")\n\n def to_list(self, phonemized: str) -> List[str]:\n fields = []\n for word in phonemized.split(self.separator.word):\n pp = re.findall(r\"\\w+|[^\\w\\s]\", word, re.UNICODE)\n fields.extend(\n [p for p in pp if p != self.separator.phone] + [self.separator.word]\n )\n assert len(\"\".join(fields[:-1])) == len(phonemized) - phonemized.count(\n self.separator.phone\n )\n return fields[:-1]\n\n def phonemization(self, text, strip=True) -> List[List[str]]:\n if isinstance(text, str):\n text = [text]\n\n phonemized = self.backend.phonemize(\n text, separator=self.separator, strip=strip, njobs=1\n )\n phonemes = [self.to_list(p) for p in phonemized]\n return phonemes\n\n def g2p_conversion(self, text: str) -> List[str]:\n phonemes = self.phonemization([text.strip()])\n return phonemes[0]" }, { "identifier": "AudioTokenizer", "path": "utils/tokenizer.py", "snippet": "class AudioTokenizer:\n \"\"\"EnCodec audio tokenizer for encoding and decoding audio.\n\n Attributes:\n device: The device on which the codec model is loaded.\n codec: The pretrained EnCodec model.\n sample_rate: Sample rate of the model.\n channels: Number of audio channels in the model.\n \"\"\"\n\n def __init__(self, device: Any = None) -> None:\n model = EncodecModel.encodec_model_24khz()\n model.set_target_bandwidth(6.0)\n remove_encodec_weight_norm(model)\n\n if not device:\n device = torch.device(\"cpu\")\n if torch.cuda.is_available():\n device = torch.device(\"cuda:0\")\n\n self._device = device\n\n self.codec = model.to(device)\n self.sample_rate = model.sample_rate\n self.channels = model.channels\n\n @property\n def device(self):\n return self._device\n\n def encode(self, wav: torch.Tensor) -> torch.Tensor:\n \"\"\"Encode the audio waveform.\n\n Args:\n wav: A tensor representing the audio waveform.\n\n Returns:\n A tensor representing the encoded audio.\n \"\"\"\n return self.codec.encode(wav.to(self.device))\n\n def decode(self, frames: torch.Tensor) -> torch.Tensor:\n \"\"\"Decode the encoded audio frames.\n\n Args:\n frames: A tensor representing the encoded audio frames.\n\n Returns:\n A tensor representing the decoded audio waveform.\n \"\"\"\n return self.codec.decode(frames)" }, { "identifier": "tokenize_audio", "path": "utils/tokenizer.py", "snippet": "def tokenize_audio(tokenizer: AudioTokenizer, audio_path: str):\n \"\"\"\n Tokenize the audio waveform using the given AudioTokenizer.\n\n Args:\n tokenizer: An instance of AudioTokenizer.\n audio_path: Path to the audio file.\n\n Returns:\n A tensor of encoded frames from the audio.\n\n Raises:\n FileNotFoundError: If the audio file is not found.\n RuntimeError: If there's an error processing the audio data.\n \"\"\"\n # try:\n # Load and preprocess the audio waveform\n wav, sr = torchaudio.load(audio_path)\n wav = convert_audio(wav, sr, tokenizer.sample_rate, tokenizer.channels)\n wav = wav.unsqueeze(0)\n\n # Extract discrete codes from EnCodec\n with torch.no_grad():\n encoded_frames = tokenizer.encode(wav)\n return encoded_frames\n\n # except FileNotFoundError:\n # raise FileNotFoundError(f\"Audio file not found at {audio_path}\")\n # except Exception as e:\n # raise RuntimeError(f\"Error processing audio data: {e}\")" }, { "identifier": "VALLE", "path": "models/tts/valle/valle.py", "snippet": "class VALLE(nn.Module):\n def __init__(\n self,\n cfg,\n decoder_cls=TransformerEncoder,\n decoder_layer_cls=TransformerEncoderLayer,\n ):\n super().__init__()\n decoder_dim = cfg.decoder_dim\n nhead = cfg.nhead\n nar_scale_factor = cfg.nar_scale_factor\n num_quantizers = cfg.num_quantizers\n num_decoder_layers = cfg.num_decoder_layers\n nar_decoder_dim = int(decoder_dim * nar_scale_factor)\n\n self.ar_text_embedding = TokenEmbedding(decoder_dim, cfg.text_token_num)\n self.nar_text_embedding = TokenEmbedding(nar_decoder_dim, cfg.text_token_num)\n\n self.ar_audio_prepend_bos = cfg.prepend_bos\n self.ar_audio_embedding = TokenEmbedding(\n decoder_dim, cfg.audio_token_num + 1 + int(cfg.prepend_bos)\n )\n self.audio_token_num = cfg.audio_token_num\n\n # PreNet of AR\n if cfg.add_prenet:\n self.ar_text_prenet = nn.Sequential(\n Transpose(),\n nn.Conv1d(decoder_dim, decoder_dim, kernel_size=5, padding=\"same\"),\n nn.BatchNorm1d(decoder_dim),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Conv1d(decoder_dim, decoder_dim, kernel_size=5, padding=\"same\"),\n nn.BatchNorm1d(decoder_dim),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Conv1d(decoder_dim, decoder_dim, kernel_size=5, padding=\"same\"),\n nn.BatchNorm1d(decoder_dim),\n nn.ReLU(),\n nn.Dropout(0.5),\n Transpose(),\n nn.Linear(decoder_dim, decoder_dim),\n )\n\n self.ar_audio_prenet = nn.Sequential(\n nn.Linear(decoder_dim, 256),\n nn.ReLU(),\n nn.Dropout(0.25),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(0.25),\n nn.Linear(256, decoder_dim),\n )\n else:\n self.ar_text_prenet = nn.Identity()\n self.ar_audio_prenet = nn.Identity()\n\n self.ar_text_position = SinePositionalEmbedding(\n decoder_dim,\n dropout=0.1,\n scale=False,\n alpha=True,\n )\n self.ar_audio_position = SinePositionalEmbedding(\n decoder_dim,\n dropout=0.1,\n scale=False,\n alpha=True,\n )\n\n self.ar_decoder = decoder_cls(\n decoder_layer_cls(\n decoder_dim,\n nhead,\n dim_feedforward=decoder_dim * 4, # *4?\n dropout=0.1,\n batch_first=True,\n norm_first=cfg.norm_first,\n ),\n num_layers=num_decoder_layers,\n norm=LayerNorm(decoder_dim) if cfg.norm_first else None,\n )\n self.ar_predict_layer = nn.Linear(\n decoder_dim, cfg.audio_token_num + 1, bias=False\n )\n\n self.ar_accuracy_metric = MulticlassAccuracy(\n cfg.audio_token_num + 1,\n top_k=10,\n average=\"micro\",\n multidim_average=\"global\",\n ignore_index=cfg.audio_token_num,\n )\n\n self.rng = random.Random(0)\n self.num_heads = nhead\n self.prefix_mode = cfg.prefix_mode\n self.num_quantizers = num_quantizers\n\n assert num_quantizers >= 1\n if num_quantizers > 1:\n self.nar_audio_embeddings = nn.ModuleList(\n [\n TokenEmbedding(nar_decoder_dim, cfg.audio_token_num + 1)\n ] # Why the first layer is audio_token_num + 1?\n + [\n TokenEmbedding(nar_decoder_dim, cfg.audio_token_num)\n for i in range(num_quantizers - 1)\n ]\n )\n\n if cfg.add_prenet:\n self.nar_text_prenet = nn.Sequential(\n Transpose(),\n nn.Conv1d(\n nar_decoder_dim, nar_decoder_dim, kernel_size=5, padding=\"same\"\n ),\n nn.BatchNorm1d(nar_decoder_dim),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Conv1d(\n nar_decoder_dim, nar_decoder_dim, kernel_size=5, padding=\"same\"\n ),\n nn.BatchNorm1d(nar_decoder_dim),\n nn.ReLU(),\n nn.Dropout(0.5),\n nn.Conv1d(\n nar_decoder_dim, nar_decoder_dim, kernel_size=5, padding=\"same\"\n ),\n nn.BatchNorm1d(nar_decoder_dim),\n nn.ReLU(),\n nn.Dropout(0.5),\n Transpose(),\n nn.Linear(nar_decoder_dim, nar_decoder_dim),\n )\n self.nar_audio_prenet = nn.Sequential(\n nn.Linear(nar_decoder_dim, 256),\n nn.ReLU(),\n nn.Dropout(0.25),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Dropout(0.25),\n nn.Linear(256, nar_decoder_dim),\n )\n else:\n self.nar_text_prenet = nn.Identity()\n self.nar_audio_prenet = nn.Identity()\n\n self.nar_text_position = SinePositionalEmbedding(\n nar_decoder_dim,\n dropout=0.0,\n scale=False,\n alpha=False,\n )\n self.nar_audio_position = SinePositionalEmbedding(\n nar_decoder_dim,\n dropout=0.1,\n scale=False,\n alpha=False,\n )\n\n self.nar_decoder = decoder_cls(\n decoder_layer_cls(\n nar_decoder_dim,\n int(nhead * nar_scale_factor),\n dim_feedforward=nar_decoder_dim * 4,\n dropout=0.1,\n batch_first=True,\n norm_first=cfg.norm_first,\n adaptive_layer_norm=True,\n ),\n num_layers=int(num_decoder_layers * nar_scale_factor),\n norm=AdaptiveLayerNorm(\n nar_decoder_dim, norm=nn.LayerNorm(nar_decoder_dim)\n )\n if cfg.norm_first\n else None,\n )\n self.nar_predict_layers = nn.ModuleList(\n [\n nn.Linear(nar_decoder_dim, cfg.audio_token_num, bias=False)\n for i in range(num_quantizers - 1)\n ]\n )\n self.nar_stage_embeddings = nn.ModuleList(\n [TokenEmbedding(nar_decoder_dim, 1) for i in range(num_quantizers - 1)]\n )\n\n if cfg.share_embedding:\n for j in range(0, num_quantizers - 2):\n self.nar_predict_layers[j].weight = self.nar_audio_embeddings[\n j + 2\n ].weight\n\n self.nar_accuracy_metric = MulticlassAccuracy(\n cfg.audio_token_num + 1,\n top_k=10,\n average=\"micro\",\n multidim_average=\"global\",\n ignore_index=cfg.audio_token_num,\n )\n\n def forward(\n self,\n x: torch.Tensor,\n x_lens: torch.Tensor,\n y: Union[torch.Tensor, PromptedFeatures],\n y_lens: Union[torch.Tensor, PromptedFeatures],\n reduction: str = \"sum\",\n train_stage: int = 0,\n **kwargs,\n ) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:\n \"\"\"\n Args:\n x:\n A 2-D tensor of shape (N, S).\n x_lens:\n A 1-D tensor of shape (N,). It contains the number of tokens in `x`\n before padding.\n y:\n A 3-D tensor of shape (N, T, 8).\n y_lens:\n A 1-D tensor of shape (N,). It contains the number of tokens in `x`\n before padding.\n train_stage:\n 0: AR & NAR modules, 1: AR modules, 2: NAR modules\n Returns:\n Return the predicted audio code matrix, cross-entropy loss and Top-10 accuracy.\n \"\"\"\n assert x.ndim == 2, x.shape\n assert x_lens.ndim == 1, x_lens.shape\n\n y_prompts_codes = None\n if isinstance(y, PromptedFeatures):\n y_prompts_codes, y = y.data\n prompts_len, y_lens = y_lens.data\n assert prompts_len.min() == prompts_len.max()\n assert self.prefix_mode == 4\n y_prompts_codes = y_prompts_codes.type(torch.int64)\n\n assert y.ndim == 3, y.shape\n assert y_lens.ndim == 1, y_lens.shape\n\n x_mask = make_pad_mask(x_lens).to(x.device)\n y_mask = make_pad_mask(y_lens).to(y.device)\n y_mask_int = y_mask.type(torch.int64)\n\n text = x\n codes = y.type(torch.int64) * (1 - y_mask_int.unsqueeze(dim=-1))\n\n y, targets = self.pad_y_eos(\n codes[..., 0], y_mask_int, eos_id=self.audio_token_num\n )\n self.y_mask_int = y_mask_int\n\n metrics = {}\n total_loss = 0.0\n\n xy_padding_mask = torch.concat([x_mask, y_mask], dim=1)\n if self.ar_audio_prepend_bos:\n ar_xy_padding_mask = torch.concat(\n [x_mask, F.pad(y_mask, (1, 0), value=False)], dim=1\n )\n else:\n ar_xy_padding_mask = xy_padding_mask\n self.xy_padding_mask = xy_padding_mask\n self.ar_xy_padding_mask = ar_xy_padding_mask\n\n # AR Decoder\n if train_stage in [0, 1]:\n ar_loss, ar_metrics = self._forward_ar_decoder(\n text, x_lens.max(), y, y_lens.max(), targets, x_mask, y_mask, reduction\n )\n total_loss += ar_loss\n metrics[\"AR_Top100Acc\"] = ar_metrics\n\n # NAR Decoder\n if self.ar_audio_prepend_bos:\n y = y[:, 1:]\n\n if self.num_quantizers > 1 and train_stage in [0, 2]:\n nar_loss, nar_metrics = self._forward_nar_decoder(\n text,\n x_lens,\n y,\n y_lens,\n codes,\n y_prompts_codes,\n x_mask,\n y_mask,\n reduction,\n )\n total_loss += nar_loss\n metrics[\"NAR_Top100Acc\"] = nar_metrics\n\n if train_stage == 0:\n total_loss = total_loss / 2.0\n\n return total_loss, metrics\n\n def _forward_ar_decoder(\n self, x, x_len, y, y_lens, targets, x_mask, y_mask, reduction\n ):\n x = self.ar_text_embedding(x)\n x = self.ar_text_prenet(x)\n x = self.ar_text_position(x)\n\n y_len = y_lens.max() + int(self.ar_audio_prepend_bos)\n\n x_attn_mask = F.pad(\n torch.zeros((x_len, x_len), dtype=torch.bool, device=x.device),\n (0, y_len),\n value=True,\n )\n y_attn_mask = F.pad(\n torch.triu(\n torch.ones(y_len, y_len, dtype=torch.bool, device=x.device),\n diagonal=1,\n ),\n (x_len, 0),\n value=False,\n )\n xy_attn_mask = torch.concat([x_attn_mask, y_attn_mask], dim=0)\n\n bsz, src_len = x.shape[0], x_len + y_len\n _xy_padding_mask = (\n self.ar_xy_padding_mask.view(bsz, 1, 1, src_len)\n .expand(-1, self.num_heads, -1, -1)\n .reshape(bsz * self.num_heads, 1, src_len)\n )\n xy_attn_mask = xy_attn_mask.logical_or(_xy_padding_mask)\n\n new_attn_mask = torch.zeros_like(xy_attn_mask, dtype=x.dtype)\n new_attn_mask.masked_fill_(xy_attn_mask, float(\"-inf\"))\n xy_attn_mask = new_attn_mask\n\n y_emb = self.ar_audio_embedding(y)\n y_emb = self.ar_audio_prenet(y_emb)\n y_pos = self.ar_audio_position(y_emb)\n\n xy_pos = torch.concat([x, y_pos], dim=1)\n\n xy_dec, _ = self.ar_decoder(\n (xy_pos, None),\n mask=xy_attn_mask,\n )\n logits = self.ar_predict_layer(xy_dec[:, x_len:]).permute(0, 2, 1)\n ar_loss = F.cross_entropy(logits, targets, reduction=reduction)\n\n ar_metrics = self.ar_accuracy_metric(\n logits.detach(), targets\n ).item() * y_lens.sum().type(torch.float32)\n\n return ar_loss, ar_metrics\n\n def _forward_nar_decoder(\n self, x, x_lens, y, y_lens, codes, y_prompts_codes, x_mask, y_mask, reduction\n ):\n num_nar_layers = self.num_quantizers - 1\n nar_stage = self.rng.choices(\n [_k for _k in range(1, self.num_quantizers)],\n weights=[1.0 / num_nar_layers] * num_nar_layers,\n k=1,\n )[0]\n\n x = self.nar_text_embedding(x)\n x = self.nar_text_prenet(x)\n x = self.nar_text_position(x)\n\n y_emb, prefix_len = self._prepare_prompts(\n y, y_lens, codes, nar_stage, y_prompts_codes\n )\n\n y_len = y_lens.max()\n targets = codes[..., nar_stage] + self.audio_token_num * self.y_mask_int\n if self.prefix_mode in [2, 4]:\n xy_padding_mask = torch.concat(\n [\n x_mask,\n F.pad(y_mask, (y_emb.shape[1] - y_len, 0), value=False),\n ],\n dim=1,\n )\n elif self.prefix_mode == 1:\n targets = targets[:, prefix_len:]\n\n y_pos = self.nar_audio_prenet(y_emb)\n y_pos = self.nar_audio_position(y_pos)\n xy_pos = torch.concat([x, y_pos], dim=1)\n xy_dec, _ = self.nar_decoder(\n (xy_pos, self.nar_stage_embeddings[nar_stage - 1].weight),\n src_key_padding_mask=self.xy_padding_mask,\n )\n xy_dec = xy_dec[:, x_lens.max() + prefix_len :]\n if self.prefix_mode == 4:\n prefix_len = 0\n logits = self.nar_predict_layers[nar_stage - 1](xy_dec).permute(0, 2, 1)\n\n total_length = (y_lens).sum().type(torch.float32)\n nar_loss = F.cross_entropy(\n logits,\n targets,\n ignore_index=self.audio_token_num,\n reduction=reduction,\n ) * (total_length / (total_length - prefix_len * x.shape[0]))\n nar_metrics = (\n self.nar_accuracy_metric(\n F.pad(\n logits.detach(),\n (0, 0, 0, 1, 0, 0),\n value=logits.min().cpu().item(),\n ),\n targets,\n ).item()\n * total_length\n )\n return nar_loss, nar_metrics\n\n def inference(\n self,\n x: torch.Tensor,\n x_lens: torch.Tensor,\n y: torch.Tensor,\n enroll_x_lens: torch.Tensor,\n top_k: int = -100,\n temperature: float = 1.0,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n x:\n A 2-D tensor of shape (1, S).\n x_lens:\n A 1-D tensor of shape (1,). It contains the number of tokens in `x`\n before padding.\n y:\n A 3-D tensor of shape (1, T, 8).\n top_k: (`optional`) int\n The number of highest probability tokens to keep for top-k-filtering. Default to -100.\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n Returns:\n Return the predicted audio code matrix.\n \"\"\"\n assert x.ndim == 2, x.shape\n assert x_lens.ndim == 1, x_lens.shape\n assert y.ndim == 3, y.shape\n assert y.shape[0] == 1, y.shape\n\n assert torch.all(x_lens > 0)\n\n text = x\n x = self.ar_text_embedding(text)\n x = self.ar_text_prenet(x)\n x = self.ar_text_position(x)\n\n text_len = x_lens.max()\n prompts = y\n prefix_len = y.shape[1]\n\n # AR Decoder\n y = prompts[..., 0]\n if self.ar_audio_prepend_bos:\n y = F.pad(y, (1, 0), value=self.audio_token_num + 1)\n\n x_len = x_lens.max()\n x_attn_mask = torch.zeros((x_len, x_len), dtype=torch.bool)\n\n while True:\n y_emb = self.ar_audio_embedding(y)\n y_emb = self.ar_audio_prenet(y_emb)\n y_pos = self.ar_audio_position(y_emb)\n xy_pos = torch.concat([x, y_pos], dim=1)\n\n y_len = y.shape[1]\n x_attn_mask_pad = F.pad(\n x_attn_mask,\n (0, y_len),\n value=True,\n )\n y_attn_mask = F.pad(\n torch.triu(torch.ones(y_len, y_len, dtype=torch.bool), diagonal=1),\n (x_len, 0),\n value=False,\n )\n xy_attn_mask = torch.concat([x_attn_mask_pad, y_attn_mask], dim=0).to(\n y.device\n )\n\n xy_dec, _ = self.ar_decoder(\n (xy_pos, None),\n mask=xy_attn_mask,\n )\n logits = self.ar_predict_layer(xy_dec[:, -1])\n samples = topk_sampling(\n logits, top_k=top_k, top_p=1.0, temperature=temperature\n )\n\n if (\n torch.argmax(logits, dim=-1)[0] == self.audio_token_num\n or samples[0, 0] == self.audio_token_num\n or (y.shape[1] - prompts.shape[1]) > x_lens.max() * 16\n ):\n if prompts.shape[1] == y.shape[1]:\n raise SyntaxError(\"well trained model shouldn't reach here.\")\n\n break\n\n y = torch.concat([y, samples], dim=1)\n\n codes = [y[:, prefix_len + int(self.ar_audio_prepend_bos) :]]\n if self.num_quantizers == 1:\n return torch.stack(codes, dim=-1)\n\n # Non-AR Decoders\n y_emb = self.nar_audio_embeddings[0](y[:, int(self.ar_audio_prepend_bos) :])\n\n if self.prefix_mode in [2, 4]:\n enrolled_len = enroll_x_lens.max().item()\n # SOS + Synthesis Text + EOS\n text = torch.concat(\n [\n text[:, :1],\n text[:, enrolled_len - 1 :],\n ],\n dim=1,\n )\n text_len = text_len - (enrolled_len - 2)\n assert text.shape[0] == 1\n\n x = self.nar_text_embedding(text)\n x = self.nar_text_prenet(x)\n x = self.nar_text_position(x)\n\n if self.prefix_mode == 0:\n for i, (predict_layer, embedding_layer) in enumerate(\n zip(\n self.nar_predict_layers,\n self.nar_audio_embeddings[1:],\n )\n ):\n y_pos = self.nar_audio_prenet(y_emb)\n y_pos = self.nar_audio_position(y_pos)\n xy_pos = torch.concat([x, y_pos], dim=1)\n\n xy_dec, _ = self.nar_decoder(\n (xy_pos, self.nar_stage_embeddings[i].weight)\n )\n logits = predict_layer(xy_dec[:, text_len + prefix_len :])\n\n samples = torch.argmax(logits, dim=-1)\n codes.append(samples)\n\n if i < self.num_quantizers - 2:\n y_emb[:, :prefix_len] += embedding_layer(prompts[..., i + 1])\n y_emb[:, prefix_len:] += embedding_layer(samples)\n else:\n for j in range(1, self.num_quantizers):\n y_emb[:, :prefix_len] += self.nar_audio_embeddings[j](prompts[..., j])\n\n for i, (predict_layer, embedding_layer) in enumerate(\n zip(\n self.nar_predict_layers,\n self.nar_audio_embeddings[1:],\n )\n ):\n y_pos = self.nar_audio_prenet(y_emb)\n y_pos = self.nar_audio_position(y_pos)\n xy_pos = torch.concat([x, y_pos], dim=1)\n\n xy_dec, _ = self.nar_decoder(\n (xy_pos, self.nar_stage_embeddings[i].weight)\n )\n logits = predict_layer(xy_dec[:, text_len + prefix_len :])\n\n samples = torch.argmax(logits, dim=-1)\n codes.append(samples)\n\n if i < self.num_quantizers - 2:\n y_emb[:, prefix_len:] += embedding_layer(samples)\n\n assert len(codes) == self.num_quantizers\n return torch.stack(codes, dim=-1)\n\n def continual(\n self,\n x: torch.Tensor,\n x_lens: torch.Tensor,\n y: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n x:\n A 2-D tensor of shape (1, S).\n x_lens:\n A 1-D tensor of shape (1,). It contains the number of tokens in `x`\n before padding.\n y:\n A 3-D tensor of shape (1, T, 8).\n Returns:\n Return the predicted audio code matrix.\n \"\"\"\n assert x.ndim == 2, x.shape\n assert x_lens.ndim == 1, x_lens.shape\n assert y.ndim == 3, y.shape\n assert y.shape[0] == 1, y.shape\n\n assert torch.all(x_lens > 0)\n assert self.num_quantizers == 8\n\n text = x\n x = self.ar_text_embedding(text)\n x = self.ar_text_prenet(x)\n x = self.ar_text_position(x)\n\n text_len = x_lens.max()\n\n prefix_len = min(int(y.shape[1] * 0.5), 3 * 75)\n\n # AR Decoder\n prompts = y[:, :prefix_len]\n\n codes = [y[:, prefix_len:, 0]]\n # Non-AR Decoders\n x = self.nar_text_embedding(text)\n x = self.nar_text_prenet(x)\n x = self.nar_text_position(x)\n\n y_emb = self.nar_audio_embeddings[0](y[..., 0])\n\n if self.prefix_mode == 0:\n for i, (predict_layer, embedding_layer) in enumerate(\n zip(\n self.nar_predict_layers,\n self.nar_audio_embeddings[1:],\n )\n ):\n y_pos = self.nar_audio_position(y_emb)\n y_pos = self.nar_audio_prenet(y_pos)\n xy_pos = torch.concat([x, y_pos], dim=1)\n\n xy_dec, _ = self.nar_decoder(\n (xy_pos, self.nar_stage_embeddings[i].weight)\n )\n logits = predict_layer(xy_dec[:, text_len + prefix_len :])\n\n samples = torch.argmax(logits, dim=-1)\n codes.append(samples)\n\n if i < 6:\n y_emb[:, :prefix_len] += embedding_layer(prompts[..., i + 1])\n y_emb[:, prefix_len:] += embedding_layer(samples)\n else:\n for j in range(1, 8):\n y_emb[:, :prefix_len] += self.nar_audio_embeddings[j](prompts[..., j])\n\n for i, (predict_layer, embedding_layer) in enumerate(\n zip(\n self.nar_predict_layers,\n self.nar_audio_embeddings[1:],\n )\n ):\n y_pos = self.nar_audio_prenet(y_emb)\n y_pos = self.nar_audio_position(y_pos)\n xy_pos = torch.concat([x, y_pos], dim=1)\n\n xy_dec, _ = self.nar_decoder(\n (xy_pos, self.nar_stage_embeddings[i].weight)\n )\n logits = predict_layer(xy_dec[:, text_len + prefix_len :])\n\n samples = torch.argmax(logits, dim=-1)\n codes.append(samples)\n\n if i < 6:\n y_emb[:, prefix_len:] += embedding_layer(samples)\n\n assert len(codes) == 8\n return torch.stack(codes, dim=-1)\n\n def stage_parameters(self, stage: int = 1) -> Iterator[nn.Parameter]:\n assert stage > 0\n if stage == 1:\n for name, param in self.named_parameters():\n if name.startswith(\"ar_\"):\n yield param\n\n if stage == 2:\n for name, param in self.named_parameters():\n if name.startswith(\"nar_\"):\n yield param\n\n def stage_named_parameters(\n self, stage: int = 1\n ) -> Iterator[Tuple[str, nn.Parameter]]:\n assert stage > 0\n if stage == 1:\n for pair in self.named_parameters():\n if pair[0].startswith(\"ar_\"):\n yield pair\n\n if stage == 2:\n for pair in self.named_parameters():\n if pair[0].startswith(\"nar_\"):\n yield pair\n\n def pad_y_eos(self, y, y_mask_int, eos_id):\n targets = F.pad(y, (0, 1), value=0) + eos_id * F.pad(\n y_mask_int, (0, 1), value=1\n )\n if self.ar_audio_prepend_bos:\n return (\n F.pad(targets[:, :-1], (1, 0), value=self.audio_token_num + 1),\n targets,\n )\n\n return targets[:, :-1], targets[:, 1:]\n\n def _prepare_prompts(self, y, y_lens, codes, nar_stage, y_prompts_codes):\n # 5.1 For the NAR acoustic prompt tokens, we select a random segment waveform of 3 seconds\n # from the same utterance.\n # We implement this differently.\n if self.prefix_mode == 0:\n # no prefix\n prefix_len = 0\n y_emb = self.nar_audio_embeddings[0](y)\n for j in range(1, nar_stage):\n # Formula (4) (5)\n y_emb = y_emb + self.nar_audio_embeddings[j](codes[..., j])\n elif self.prefix_mode == 1:\n # prefix at begining\n int_low = (0.25 * y_lens.min()).type(torch.int64).item()\n prefix_len = torch.randint(int_low, int_low * 2, size=()).item()\n prefix_len = min(prefix_len, 225) # 24000/320 * 3s = 225 frames\n\n y_prompts = self.nar_audio_embeddings[0](y[:, :prefix_len])\n y_emb = self.nar_audio_embeddings[0](y[:, prefix_len:])\n for j in range(1, self.num_quantizers):\n y_prompts += self.nar_audio_embeddings[j](codes[:, :prefix_len, j])\n if j < nar_stage:\n y_emb += self.nar_audio_embeddings[j](codes[:, prefix_len:, j])\n y_emb = torch.concat([y_prompts, y_emb], axis=1)\n elif self.prefix_mode in [2, 4]:\n if self.prefix_mode == 2:\n # random prefix\n prefix_len = min(225, int(0.25 * y_lens.min().item()))\n\n y_prompts_codes = []\n for b in range(codes.shape[0]):\n start = self.rng.randint(0, y_lens[b].item() - prefix_len)\n y_prompts_codes.append(\n torch.clone(codes[b, start : start + prefix_len])\n )\n codes[b, start : start + prefix_len, nar_stage] = NUM_AUDIO_TOKENS\n y_prompts_codes = torch.stack(y_prompts_codes, dim=0)\n else:\n prefix_len = y_prompts_codes.shape[1]\n\n y_prompts = self.nar_audio_embeddings[0](y_prompts_codes[..., 0])\n y_emb = self.nar_audio_embeddings[0](y)\n for j in range(1, self.num_quantizers):\n y_prompts += self.nar_audio_embeddings[j](y_prompts_codes[..., j])\n if j < nar_stage:\n y_emb += self.nar_audio_embeddings[j](codes[..., j])\n y_emb = torch.concat([y_prompts, y_emb], axis=1)\n else:\n raise ValueError\n\n return y_emb, prefix_len" }, { "identifier": "TTSInference", "path": "models/tts/base/tts_inferece.py", "snippet": "class TTSInference(object):\n def __init__(self, args=None, cfg=None):\n super().__init__()\n\n start = time.monotonic_ns()\n self.args = args\n self.cfg = cfg\n self.infer_type = args.mode\n\n # get exp_dir\n if self.args.acoustics_dir is not None:\n self.exp_dir = self.args.acoustics_dir\n elif self.args.checkpoint_path is not None:\n self.exp_dir = os.path.dirname(os.path.dirname(self.args.checkpoint_path))\n\n # Init accelerator\n self.accelerator = accelerate.Accelerator()\n self.accelerator.wait_for_everyone()\n self.device = self.accelerator.device\n\n # Get logger\n with self.accelerator.main_process_first():\n self.logger = get_logger(\"inference\", log_level=args.log_level)\n\n # Log some info\n self.logger.info(\"=\" * 56)\n self.logger.info(\"||\\t\\t\" + \"New inference process started.\" + \"\\t\\t||\")\n self.logger.info(\"=\" * 56)\n self.logger.info(\"\\n\")\n\n self.acoustic_model_dir = args.acoustics_dir\n self.logger.debug(f\"Acoustic model dir: {args.acoustics_dir}\")\n\n if args.vocoder_dir is not None:\n self.vocoder_dir = args.vocoder_dir\n self.logger.debug(f\"Vocoder dir: {args.vocoder_dir}\")\n\n os.makedirs(args.output_dir, exist_ok=True)\n\n # Set random seed\n with self.accelerator.main_process_first():\n start = time.monotonic_ns()\n self._set_random_seed(self.cfg.train.random_seed)\n end = time.monotonic_ns()\n self.logger.debug(\n f\"Setting random seed done in {(end - start) / 1e6:.2f}ms\"\n )\n self.logger.debug(f\"Random seed: {self.cfg.train.random_seed}\")\n\n # Setup data loader\n if self.infer_type == \"batch\":\n with self.accelerator.main_process_first():\n self.logger.info(\"Building dataset...\")\n start = time.monotonic_ns()\n self.test_dataloader = self._build_test_dataloader()\n end = time.monotonic_ns()\n self.logger.info(\n f\"Building dataset done in {(end - start) / 1e6:.2f}ms\"\n )\n\n # Build model\n with self.accelerator.main_process_first():\n self.logger.info(\"Building model...\")\n start = time.monotonic_ns()\n self.model = self._build_model()\n end = time.monotonic_ns()\n self.logger.info(f\"Building model done in {(end - start) / 1e6:.3f}ms\")\n\n # Init with accelerate\n self.logger.info(\"Initializing accelerate...\")\n start = time.monotonic_ns()\n self.accelerator = accelerate.Accelerator()\n self.model = self.accelerator.prepare(self.model)\n if self.infer_type == \"batch\":\n self.test_dataloader = self.accelerator.prepare(self.test_dataloader)\n end = time.monotonic_ns()\n self.accelerator.wait_for_everyone()\n self.logger.info(f\"Initializing accelerate done in {(end - start) / 1e6:.3f}ms\")\n\n with self.accelerator.main_process_first():\n self.logger.info(\"Loading checkpoint...\")\n start = time.monotonic_ns()\n if args.acoustics_dir is not None:\n self._load_model(\n checkpoint_dir=os.path.join(args.acoustics_dir, \"checkpoint\")\n )\n elif args.checkpoint_path is not None:\n self._load_model(checkpoint_path=args.checkpoint_path)\n else:\n print(\"Either checkpoint dir or checkpoint path should be provided.\")\n\n end = time.monotonic_ns()\n self.logger.info(f\"Loading checkpoint done in {(end - start) / 1e6:.3f}ms\")\n\n self.model.eval()\n self.accelerator.wait_for_everyone()\n\n def _build_test_dataset(self):\n pass\n\n def _build_model(self):\n pass\n\n # TODO: LEGACY CODE\n def _build_test_dataloader(self):\n datasets, collate = self._build_test_dataset()\n self.test_dataset = datasets(self.args, self.cfg)\n self.test_collate = collate(self.cfg)\n self.test_batch_size = min(\n self.cfg.train.batch_size, len(self.test_dataset.metadata)\n )\n test_dataloader = DataLoader(\n self.test_dataset,\n collate_fn=self.test_collate,\n num_workers=1,\n batch_size=self.test_batch_size,\n shuffle=False,\n )\n return test_dataloader\n\n def _load_model(\n self,\n checkpoint_dir: str = None,\n checkpoint_path: str = None,\n old_mode: bool = False,\n ):\n r\"\"\"Load model from checkpoint. If checkpoint_path is None, it will\n load the latest checkpoint in checkpoint_dir. If checkpoint_path is not\n None, it will load the checkpoint specified by checkpoint_path. **Only use this\n method after** ``accelerator.prepare()``.\n \"\"\"\n\n if checkpoint_path is None:\n assert checkpoint_dir is not None\n # Load the latest accelerator state dicts\n ls = [\n str(i) for i in Path(checkpoint_dir).glob(\"*\") if not \"audio\" in str(i)\n ]\n ls.sort(key=lambda x: int(x.split(\"_\")[-3].split(\"-\")[-1]), reverse=True)\n checkpoint_path = ls[0]\n\n self.accelerator.load_state(str(checkpoint_path))\n return str(checkpoint_path)\n\n def inference(self):\n if self.infer_type == \"single\":\n out_dir = os.path.join(self.args.output_dir, \"single\")\n os.makedirs(out_dir, exist_ok=True)\n\n pred_audio = self.inference_for_single_utterance()\n save_path = os.path.join(out_dir, \"test_pred.wav\")\n save_audio(save_path, pred_audio, self.cfg.preprocess.sample_rate)\n\n elif self.infer_type == \"batch\":\n out_dir = os.path.join(self.args.output_dir, \"batch\")\n os.makedirs(out_dir, exist_ok=True)\n\n pred_audio_list = self.inference_for_batches()\n for it, wav in zip(self.test_dataset.metadata, pred_audio_list):\n uid = it[\"Uid\"]\n save_audio(\n os.path.join(out_dir, f\"{uid}.wav\"),\n wav.numpy(),\n self.cfg.preprocess.sample_rate,\n add_silence=True,\n turn_up=True,\n )\n tmp_file = os.path.join(out_dir, f\"{uid}.pt\")\n if os.path.exists(tmp_file):\n os.remove(tmp_file)\n print(\"Saved to: \", out_dir)\n\n @torch.inference_mode()\n def inference_for_batches(self):\n y_pred = []\n for i, batch in tqdm(enumerate(self.test_dataloader)):\n y_pred, mel_lens, _ = self._inference_each_batch(batch)\n y_ls = y_pred.chunk(self.test_batch_size)\n tgt_ls = mel_lens.chunk(self.test_batch_size)\n j = 0\n for it, l in zip(y_ls, tgt_ls):\n l = l.item()\n it = it.squeeze(0)[:l].detach().cpu()\n\n uid = self.test_dataset.metadata[i * self.test_batch_size + j][\"Uid\"]\n torch.save(it, os.path.join(self.args.output_dir, f\"{uid}.pt\"))\n j += 1\n\n vocoder_cfg, vocoder_ckpt = self._parse_vocoder(self.args.vocoder_dir)\n res = synthesis(\n cfg=vocoder_cfg,\n vocoder_weight_file=vocoder_ckpt,\n n_samples=None,\n pred=[\n torch.load(\n os.path.join(self.args.output_dir, \"{}.pt\".format(item[\"Uid\"]))\n ).numpy()\n for item in self.test_dataset.metadata\n ],\n )\n for it, wav in zip(self.test_dataset.metadata, res):\n uid = it[\"Uid\"]\n save_audio(\n os.path.join(self.args.output_dir, f\"{uid}.wav\"),\n wav.numpy(),\n 22050,\n add_silence=True,\n turn_up=True,\n )\n\n @abstractmethod\n @torch.inference_mode()\n def _inference_each_batch(self, batch_data):\n pass\n\n def inference_for_single_utterance(self, text):\n pass\n\n def synthesis_by_vocoder(self, pred):\n audios_pred = synthesis(\n self.vocoder_cfg,\n self.checkpoint_dir_vocoder,\n len(pred),\n pred,\n )\n\n return audios_pred\n\n @staticmethod\n def _parse_vocoder(vocoder_dir):\n r\"\"\"Parse vocoder config\"\"\"\n vocoder_dir = os.path.abspath(vocoder_dir)\n ckpt_list = [ckpt for ckpt in Path(vocoder_dir).glob(\"*.pt\")]\n ckpt_list.sort(key=lambda x: int(x.stem), reverse=True)\n ckpt_path = str(ckpt_list[0])\n vocoder_cfg = load_config(\n os.path.join(vocoder_dir, \"args.json\"), lowercase=True\n )\n return vocoder_cfg, ckpt_path\n\n def _set_random_seed(self, seed):\n \"\"\"Set random seed for all possible random modules.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.random.manual_seed(seed)" }, { "identifier": "VALLETestDataset", "path": "models/tts/valle/valle_dataset.py", "snippet": "class VALLETestDataset(TTSTestDataset):\n def __init__(self, args, cfg):\n super().__init__(args, cfg)\n\n # prepare data\n assert cfg.preprocess.use_acoustic_token == True\n if cfg.preprocess.use_acoustic_token:\n self.utt2acousticToken = {}\n for utt_info in self.metadata:\n dataset = utt_info[\"Dataset\"]\n uid = utt_info[\"Uid\"]\n utt = \"{}_{}\".format(dataset, uid)\n\n # extract acoustic token\n audio_file = utt_info[\"Audio_pormpt_path\"]\n encoded_frames = tokenize_audio(self.audio_tokenizer, audio_file)\n audio_prompt_token = (\n encoded_frames[0][0].transpose(2, 1).squeeze(0).cpu().numpy()\n )\n self.utt2acousticToken[utt] = audio_prompt_token\n\n def __getitem__(self, index):\n utt_info = self.metadata[index]\n\n dataset = utt_info[\"Dataset\"]\n uid = utt_info[\"Uid\"]\n utt = \"{}_{}\".format(dataset, uid)\n\n single_feature = dict()\n\n # acoustic token\n if self.cfg.preprocess.use_acoustic_token:\n acoustic_token = self.utt2acousticToken[utt]\n if \"target_len\" not in single_feature.keys():\n single_feature[\"target_len\"] = acoustic_token.shape[0]\n single_feature[\"acoustic_token\"] = acoustic_token # [T, 8]\n\n # phone sequence todo\n if self.cfg.preprocess.use_phone:\n single_feature[\"phone_seq\"] = np.array(self.utt2seq[utt])\n single_feature[\"phone_len\"] = len(self.utt2seq[utt])\n single_feature[\"pmt_phone_seq\"] = np.array(self.utt2pmtseq[utt])\n single_feature[\"pmt_phone_len\"] = len(self.utt2pmtseq[utt])\n\n return single_feature\n\n def get_metadata(self):\n with open(self.metafile_path, \"r\", encoding=\"utf-8\") as f:\n metadata = json.load(f)\n return metadata\n\n def __len__(self):\n return len(self.metadata)" }, { "identifier": "VALLETestCollator", "path": "models/tts/valle/valle_dataset.py", "snippet": "class VALLETestCollator(TTSTestCollator):\n def __init__(self, cfg):\n self.cfg = cfg\n\n def __call__(self, batch):\n packed_batch_features = dict()\n\n for key in batch[0].keys():\n if key == \"target_len\":\n packed_batch_features[\"target_len\"] = torch.LongTensor(\n [b[\"target_len\"] for b in batch]\n )\n masks = [\n torch.ones((b[\"target_len\"], 1), dtype=torch.long) for b in batch\n ]\n packed_batch_features[\"mask\"] = pad_sequence(\n masks, batch_first=True, padding_value=0\n )\n elif key == \"phone_len\":\n packed_batch_features[\"phone_len\"] = torch.LongTensor(\n [b[\"phone_len\"] for b in batch]\n )\n masks = [\n torch.ones((b[\"phone_len\"], 1), dtype=torch.long) for b in batch\n ]\n packed_batch_features[\"phn_mask\"] = pad_sequence(\n masks, batch_first=True, padding_value=0\n )\n elif key == \"pmt_phone_len\":\n packed_batch_features[\"pmt_phone_len\"] = torch.LongTensor(\n [b[\"pmt_phone_len\"] for b in batch]\n )\n masks = [\n torch.ones((b[\"pmt_phone_len\"], 1), dtype=torch.long) for b in batch\n ]\n packed_batch_features[\"pmt_phone_len_mask\"] = pad_sequence(\n masks, batch_first=True, padding_value=0\n )\n elif key == \"audio_len\":\n packed_batch_features[\"audio_len\"] = torch.LongTensor(\n [b[\"audio_len\"] for b in batch]\n )\n masks = [\n torch.ones((b[\"audio_len\"], 1), dtype=torch.long) for b in batch\n ]\n else:\n values = [torch.from_numpy(b[key]) for b in batch]\n packed_batch_features[key] = pad_sequence(\n values, batch_first=True, padding_value=0\n )\n\n return packed_batch_features" }, { "identifier": "phoneExtractor", "path": "processors/phone_extractor.py", "snippet": "class phoneExtractor:\n def __init__(self, cfg, dataset_name=None, phone_symbol_file=None):\n \"\"\"\n Args:\n cfg: config\n dataset_name: name of dataset\n \"\"\"\n self.cfg = cfg\n\n # phone symbols dict\n self.phone_symbols = set()\n\n # phone symbols dict file\n if phone_symbol_file is not None:\n self.phone_symbols_file = phone_symbol_file\n elif dataset_name is not None:\n self.dataset_name = dataset_name\n self.phone_symbols_file = os.path.join(\n cfg.preprocess.processed_dir, dataset_name, cfg.preprocess.symbols_dict\n )\n\n # initialize g2p module\n if cfg.preprocess.phone_extractor in [\n \"espeak\",\n \"pypinyin\",\n \"pypinyin_initials_finals\",\n ]:\n self.g2p_module = G2PModule(backend=cfg.preprocess.phone_extractor)\n elif cfg.preprocess.phone_extractor == \"lexicon\":\n assert cfg.preprocess.lexicon_path != \"\"\n self.g2p_module = LexiconModule(cfg.preprocess.lexicon_path)\n else:\n print(\"No suppert to\", cfg.preprocess.phone_extractor)\n raise\n\n def extract_phone(self, text):\n \"\"\"\n Extract phone from text\n Args:\n\n text: text of utterance\n\n Returns:\n phone_symbols: set of phone symbols\n phone_seq: list of phone sequence of each utterance\n \"\"\"\n\n if self.cfg.preprocess.phone_extractor in [\n \"espeak\",\n \"pypinyin\",\n \"pypinyin_initials_finals\",\n ]:\n text = text.replace(\"”\", '\"').replace(\"“\", '\"')\n phone = self.g2p_module.g2p_conversion(text=text)\n self.phone_symbols.update(phone)\n phone_seq = [phn for phn in phone]\n\n elif self.cfg.preprocess.phone_extractor == \"lexicon\":\n phone_seq = self.g2p_module.g2p_conversion(text)\n phone = phone_seq\n if not isinstance(phone_seq, list):\n phone_seq = phone_seq.split()\n\n return phone_seq\n\n def save_dataset_phone_symbols_to_table(self):\n # load and merge saved phone symbols\n if os.path.exists(self.phone_symbols_file):\n phone_symbol_dict_saved = SymbolTable.from_file(\n self.phone_symbols_file\n )._sym2id.keys()\n self.phone_symbols.update(set(phone_symbol_dict_saved))\n\n # save phone symbols\n phone_symbol_dict = SymbolTable()\n for s in sorted(list(self.phone_symbols)):\n phone_symbol_dict.add(s)\n phone_symbol_dict.to_file(self.phone_symbols_file)" }, { "identifier": "phoneIDCollation", "path": "text/text_token_collation.py", "snippet": "class phoneIDCollation:\n def __init__(self, cfg, dataset=None, symbols_dict_file=None) -> None:\n if cfg.preprocess.phone_extractor != \"lexicon\":\n ### get text token collator\n if symbols_dict_file is None:\n assert dataset is not None\n symbols_dict_file = os.path.join(\n cfg.preprocess.processed_dir, dataset, cfg.preprocess.symbols_dict\n )\n self.text_token_colloator, token2idx = get_text_token_collater(\n symbols_dict_file\n )\n # # unique_tokens = SymbolTable.from_file(symbols_dict_path)\n # # text_tokenizer = TextToken(unique_tokens.symbols, add_bos=True, add_eos=True)\n\n # # update phone symbols dict file with pad_symbol or optional tokens (add_bos and add_eos) in TextTokenCollator\n # phone_symbol_dict = SymbolTable()\n # for s in sorted(list(set(token2idx.keys()))):\n # phone_symbol_dict.add(s)\n # phone_symbol_dict.to_file(symbols_dict_file)\n\n def get_phone_id_sequence(self, cfg, phones_seq):\n if cfg.preprocess.phone_extractor == \"lexicon\":\n phones_seq = \" \".join(phones_seq)\n sequence = text_to_sequence(phones_seq, cfg.preprocess.text_cleaners)\n else:\n sequence, seq_len = self.text_token_colloator(phones_seq)\n return sequence" } ]
import os import numpy as np import torch import torchaudio import argparse from text.g2p_module import G2PModule from utils.tokenizer import AudioTokenizer, tokenize_audio from models.tts.valle.valle import VALLE from models.tts.base.tts_inferece import TTSInference from models.tts.valle.valle_dataset import VALLETestDataset, VALLETestCollator from processors.phone_extractor import phoneExtractor from text.text_token_collation import phoneIDCollation
13,115
# Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class VALLEInference(TTSInference): def __init__(self, args=None, cfg=None): TTSInference.__init__(self, args, cfg) self.g2p_module = G2PModule(backend=self.cfg.preprocess.phone_extractor) text_token_path = os.path.join( cfg.preprocess.processed_dir, cfg.dataset[0], cfg.preprocess.symbols_dict ) self.audio_tokenizer = AudioTokenizer() def _build_model(self): model = VALLE(self.cfg.model) return model def _build_test_dataset(self): return VALLETestDataset, VALLETestCollator def inference_one_clip(self, text, text_prompt, audio_file, save_name="pred"): # get phone symbol file phone_symbol_file = None if self.cfg.preprocess.phone_extractor != "lexicon": phone_symbol_file = os.path.join( self.exp_dir, self.cfg.preprocess.symbols_dict ) assert os.path.exists(phone_symbol_file) # convert text to phone sequence
# Copyright (c) 2023 Amphion. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. class VALLEInference(TTSInference): def __init__(self, args=None, cfg=None): TTSInference.__init__(self, args, cfg) self.g2p_module = G2PModule(backend=self.cfg.preprocess.phone_extractor) text_token_path = os.path.join( cfg.preprocess.processed_dir, cfg.dataset[0], cfg.preprocess.symbols_dict ) self.audio_tokenizer = AudioTokenizer() def _build_model(self): model = VALLE(self.cfg.model) return model def _build_test_dataset(self): return VALLETestDataset, VALLETestCollator def inference_one_clip(self, text, text_prompt, audio_file, save_name="pred"): # get phone symbol file phone_symbol_file = None if self.cfg.preprocess.phone_extractor != "lexicon": phone_symbol_file = os.path.join( self.exp_dir, self.cfg.preprocess.symbols_dict ) assert os.path.exists(phone_symbol_file) # convert text to phone sequence
phone_extractor = phoneExtractor(self.cfg)
7
2023-11-15 09:19:27+00:00
16k
BobaZooba/xllm
tests/conftest.py
[ { "identifier": "enums", "path": "src/xllm/enums.py", "snippet": "class General:\nclass Transformers:\nclass Registry:\nclass Datasets:\nclass Collators:\nclass Trainers:\nclass Experiments:\nclass EnvironmentVariables:\nclass LogLevel:" }, { "identifier": "LMCollator", "path": "src/xllm/collators/lm.py", "snippet": "class LMCollator(BaseCollator):\n \"\"\"\n `LMCollator` is a data collator class specifically designed to prepare batches of data for language modeling tasks.\n Extending the `BaseCollator`, it adapts the general data collation procedure to suit the sequential nature of\n language models, where each token in an input sequence is used to predict the next token.\n\n The `LMCollator` provides a streamlined approach to handle the conversion of raw text data into the tokenized and\n tensor-formatted inputs required by language models during training. Its primary functionality is implemented in\n the `parse_batch` method, which oversees this conversion process.\n\n This collator is needed for simple language modeling. It compiles the lists of texts from each example into a\n single text by concatenating them using a separator.\n\n Key functionalities provided by `LMCollator`:\n\n - `parse_batch`: A method that processes a batch of `RawSample` objects, creating the tensors needed for training.\n It generates input token IDs (`input_ids`), an attention mask to differentiate between real tokens and padding\n (`attention_mask`), and the labels for training the language model (`labels`).\n\n Attributes (inherited from `BaseCollator`):\n\n - `tokenizer`: The tokenizer used to convert raw text into token IDs.\n - `max_length`: The maximum length allowed for tokenized sequences. Longer sequences will be truncated\n to this length.\n - `separator`: A string used to join text pieces within each raw sample, if necessary.\n\n The `LMCollator` is particularly useful when training Transformer-based language models like GPT on\n next-token prediction tasks. Since it already applies common preprocessing steps such as tokenization, padding,\n truncation, and sequence shifting, it makes setting up training pipelines simpler and more efficient.\n\n Usage of the `LMCollator` facilitates the generation of data in the precise format required by language models,\n enabling practitioners to focus on model architecture and performance rather than boilerplate data preparation code.\n \"\"\"\n\n def parse_batch(self, raw_batch: List[RawSample]) -> Batch:\n \"\"\"\n Processes a batch of raw text samples and converts them into a suitable format for language model training,\n specifically for tasks involving language modeling (LM) such as next-token prediction.\n\n Args:\n raw_batch (`List[RawSample]`):\n A list of dictionaries, where each `RawSample` dictionary contains a key-value pair.\n The key is defined by `enums.General.text_parts` and the value is expected to be either a string,\n a numeric value, or a list of these types representing segments of text to include in the batch.\n\n Returns:\n `Batch`: A dictionary containing the following keys, each associated with its corresponding tensor:\n - `enums.Transformers.input_ids`: The input tokens, with the last token removed from each sequence,\n as input to the model.\n - `enums.Transformers.attention_mask`: The attention mask, indicating valid tokens (as 1) and padding\n tokens (as 0) for the model, also with the last token removed from each sequence.\n - `enums.Transformers.labels`: The labels used for training the language model, obtained by shifting\n the input IDs by one token to the right to predict the next token.\n\n The `parse_batch` method operates in the following steps:\n\n - Joins the text segments for each sample in the batch using the specified separator.\n - Tokenizes the combined texts using the provided tokenizer, with padding to the longest sequence in the batch\n and truncation to the specified maximum length.\n - Constructs the input IDs and attention mask for the model input, ensuring to remove the last token from each,\n as it has no subsequent token to predict.\n - Prepares the labels by shifting the input sequence by one token position, facilitating the LM's task of\n predicting the next token in the sequence.\n\n This collator is tailored for language modeling, where the inputs and labels are often closely related\n sequences, with labels simply being the input sequence offset by one token. It integrates smoothly with\n training routines that utilize PyTorch's DataLoader and other training utilities.\n \"\"\"\n\n texts = list()\n\n for sample in raw_batch:\n item = sample[enums.General.text_parts]\n if isinstance(item, (str, int, float)):\n texts.append(self.separator.join(str(item)))\n else:\n texts.append(self.separator.join(str(i) for i in item))\n\n tokenized = self.tokenizer(\n texts,\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n max_length=self.max_length,\n )\n\n batch = {\n enums.Transformers.input_ids: tokenized.input_ids[:, :-1],\n enums.Transformers.attention_mask: tokenized.attention_mask[:, :-1],\n enums.Transformers.labels: tokenized.input_ids[:, 1:],\n }\n\n return batch" }, { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as training, quantization, and\n optimization.\n\n Write more here:\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#config\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#detailed-config-explanation\n\n This dataclass is used to encapsulate and standardize the configuration for a diverse range of tasks including\n dataset preparation, tokenizer and model initialization, training, evaluation, and interactions with remote services\n like the Hugging Face Model Hub.\n\n Attributes in this class cover aspects like model name and path, tokenizer settings, dataset paths, training\n strategies, quantization methods, hardware acceleration, logging, output directories, and more. The class provides\n properties with custom logic to resolve specific configurations and validation checks to ensure the environment is\n appropriately set up before proceeding with the workflow.\n\n Customization and flexibility are core to this class, as it provides reasonable defaults while also allowing for\n detailed and scalable configurations catered to advanced tasks such as leveraging LoRA, FSDP, deepspeed stage\n setups, and applying incremental quantization techniques like GPTQ and bits-and-bytes.\n\n Methods within the class include:\n - `check`: Performs checks across various attributes for compatibility and correctness.\n - Property getters such as `correct_tokenizer_name_or_path`, `lora_target_modules`, `dtype`, `deepspeed`, `fsdp`,\n and `lora_model_name_or_path_for_fusing` to fetch calculated or defaulted values based on attribute settings.\n\n Subclassing can be done to extend or modify the functionality of the `Config` class to address specific experimental\n scenarios or customized workflows. It is the central piece for orchestrating experimental setups and is intimately\n integrated with the rest of the codebase that operates on top of these configurations.\n\n Attributes:\n\n General Settings:\n - `experiment_key`: An enumeration key to specify the experiment type.\n - `save_safetensors`: A boolean value to indicate whether to use safe serialization for tensors.\n - `max_shard_size`: The maximum shard size when pushing the model to the HuggingFace Hub.\n - `local_rank`: Local rank for distributed training, used for logging and saving.\n - `use_gradient_checkpointing`: If set to `True`, enables gradient checkpointing to reduce memory usage at\n the cost of a slower backward pass.\n - `trainer_key`: An enumeration key to select the trainer using the trainers_registry.\n - `force_fp32`: Forces loading the model in fp32 precision, if set to `True`.\n - `force_fp16`: Forces loading the model in fp16 precision, if set to `True`.\n - `from_gptq`: Indicates if a GPTQ quantized model is being loaded.\n - `huggingface_hub_token`: Token for uploading models to HuggingFace Hub.\n - `deepspeed_stage`: Predefined DeepSpeed stage for optimization.\n - `deepspeed_config_path`: Path to the DeepSpeed config file.\n - `fsdp_strategy`: The strategy to be used for Fully Sharded Data Parallelism (FSDP).\n - `fsdp_offload`: If set to `True`, offloads weights to CPU when using FSDP to save memory.\n - `seed`: Seed for random number generators to ensure reproducibility.\n - `stabilize`: Converts some model weights to fp32 and others to bf16 for stabilization.\n - `path_to_env_file`: Custom path to the .env file for reading environment variables.\n\n Data Preparation:\n - `prepare_dataset`: Flags whether to prepare the dataset during the \"prepare\" stage.\n\n LoRA Fusing:\n - `lora_hub_model_id`: Name of the LoRA model on the hub for fusion.\n - `lora_model_local_path`: Local path to LoRA model to be fused.\n - `fused_model_local_path`: Local path to save the fused model.\n - `fuse_after_training`: If `True`, will fuse the model post-training.\n\n GPTQ Quantization:\n - `quantization_dataset_id`: Dataset ID for GPTQ quantization.\n - `quantization_max_samples`: Maximum number of samples to use during GPTQ quantization.\n - `quantized_model_path`: Path to save the GPTQ quantized model.\n - `quantized_hub_model_id`: Name of the model at the hub post-GPTQ quantization.\n - `quantized_hub_private_repo`: If set to `True`, creates a private repository for the quantized model.\n\n Dataset Related:\n - `dataset_key`: Key to select the dataset from the datasets_registry.\n - `train_local_path_to_data`: Local path to the training data file.\n - `eval_local_path_to_data`: Local path to the evaluation data file.\n - `shuffle`: If `True`, shuffles the training data.\n - `max_eval_samples`: Maximum number of examples to use for evaluation.\n - `add_eval_to_train_if_no_path`: If `True`, adds evaluation data to training if there's no separate eval path.\n\n Tokenizer Settings:\n - `tokenizer_name_or_path`: Name or path to the tokenizer.\n - `tokenizer_use_fast`: If `True`, uses the fast version of the tokenizer.\n - `tokenizer_padding_side`: Sets padding side to 'right' or 'left'.\n\n Data Collator Settings:\n - `collator_key`: Key to select the collator from the collators_registry.\n - `max_length`: Maximum sequence length for the model.\n\n Model Configuration:\n - `model_name_or_path`: Name or path to the model to be used.\n - `push_to_hub_bos_add_bos_token`: Adds BOS token when uploading tokenization configuration to the hub.\n - `use_flash_attention_2`: Flags the use of flash attention 2.\n - `trust_remote_code`: If `True`, trusts remote code from the HuggingFace Hub.\n - `device_map`: Device map for placing model layers on specific devices.\n - `prepare_model_for_kbit_training`: If `True`, prepares the model for k-bit training.\n\n BitsAndBytes Integration:\n - `load_in_8bit`: Load the model in 8-bit mode using bitsandbytes.\n - `load_in_4bit`: Load the model in 4-bit mode using bitsandbytes.\n - `llm_int8_threshold`: Threshold for detecting outliers in the model weights.\n - `llm_int8_has_fp16_weight`: If `True`, the model will have fp16 weights.\n - `bnb_4bit_use_double_quant`: If `True`, a second quantization step is used for 4-bit weights.\n - `bnb_4bit_quant_type`: Specifies the quantization type used for 4-bit weights.\n - `bnb_quantize_after_model_init`: Determines when the quantization should occur.\n\n GPTQ Specific Parameters:\n - `gptq_bits`: Number of bits for GPTQ quantization.\n - `gptq_group_size`: Group size for GPTQ quantization.\n - `gptq_disable_exllama`: If `True`, disables ExLlama kernels during GPTQ quantization.\n\n LoRA Specific Parameters:\n - `apply_lora`: If `True`, applies LoRA to the model.\n - `lora_rank`: LoRA rank to define the size of the LoRA matrices.\n - `lora_alpha`: Multiplication factor for the resulting LoRA matrix.\n - `lora_dropout`: Dropout rate for LoRA.\n - `raw_lora_target_modules`: Comma-separated string of module names to apply LoRA, or 'all' to apply broadly.\n\n Training Arguments:\n - `output_dir`: Path to save training outputs.\n - `per_device_train_batch_size`: Batch size per device for training.\n - `do_eval`: If `True`, performs evaluation.\n - `per_device_eval_batch_size`: Batch size per device for evaluation.\n - `gradient_accumulation_steps`: Number of steps to accumulate gradients for larger effective batch size.\n - `eval_accumulation_steps`: Number of steps to accumulate gradients during evaluation.\n - `eval_delay`: Delay before the first evaluation.\n - `eval_steps`: Number of update steps between evaluations.\n - `warmup_steps`: Number of steps for learning rate warmup.\n - `max_steps`: Maximum number of training steps.\n - `num_train_epochs`: Number of epochs for training.\n - `learning_rate`: Learning rate for the optimizer.\n - `max_grad_norm`: Gradient clipping threshold.\n - `weight_decay`: Coefficient for weight decay regularization.\n - `label_smoothing_factor`: Label smoothing factor.\n - `logging_steps`: Number of steps between logging intermediate results.\n - `save_steps`: Number of training steps between checkpoints and model upload.\n - `save_total_limit`: Maximum number of checkpoints to keep.\n - `optim`: Optimizer name, overwritten by DeepSpeed if used.\n - `push_to_hub`: If `True`, model checkpoints are uploaded to HuggingFace Hub.\n - `hub_model_id`: Name of the model on the HuggingFace Hub.\n - `hub_private_repo`: If `True`, creates a private repository on the HuggingFace Hub.\n\n Weights & Biases Integration:\n - `report_to_wandb`: If `True`, logs metrics to Weights & Biases.\n - `wandb_api_key`: API key for Weights & Biases.\n - `wandb_project`: Project name on Weights & Biases.\n - `wandb_entity`: Entity name (user or organization) on Weights & Biases.\n\n Example of creating a `Config` object:\n ```python\n config = Config(\n model_name_or_path='gpt2',\n dataset_key='my_dataset',\n gradient_accumulation_steps=8,\n max_length=512,\n deepspeed_stage=\"3\",\n )\n ```\n\n Note:\n - Throughout the codebase, `Config` instances are passed around to provide a unified source of configurations\n for various components.\n - It is crucial to ensure all required settings are properly set in a `Config` object before it is utilized,\n particularly when overriding defaults or specifying custom configurations.\n \"\"\"\n\n # general\n experiment_key: str = field(\n default=enums.Experiments.base,\n metadata={\"help\": \"Experiment class key\"},\n )\n save_safetensors: bool = field(\n default=True,\n metadata={\n \"help\": \"Use safe serialization (safe tensors) or not\",\n },\n )\n max_shard_size: str = field(\n default=\"10GB\", metadata={\"help\": \"max_shard_size for the model pushing to the HuggingFace Hub\"}\n )\n local_rank: int = field(\n default=0,\n metadata={\n \"help\": \"Local rank for logging and saving. Works only in distributed training\",\n },\n )\n use_gradient_checkpointing: bool = field(\n default=False,\n metadata={\n \"help\": \"If True, use gradient checkpointing to save memory at the expense of slower backward pass\",\n },\n )\n trainer_key: str = field(\n default=enums.Trainers.lm,\n metadata={\n \"help\": \"Key of the trainer for loading from trainers_registry\",\n },\n )\n force_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp32 when model loading\",\n },\n )\n force_fp16: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp16 when model loading\",\n },\n )\n from_gptq: bool = field(\n default=False,\n metadata={\n \"help\": \"If you loadining GPTQ quantized model\",\n },\n )\n huggingface_hub_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"HuggingFace Hub token. You can also set this key using .env file\",\n },\n )\n single_gpu: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Indicates that you are learning on the same GPU. It is necessary to use DeepSpeed on a single GPU\",\n },\n )\n master_port: int = field(\n default=9994,\n metadata={\n \"help\": \"Master port for running DeepSpeed on a single GPU. Modify if RuntimeError: Address already in use\",\n },\n )\n deepspeed_stage: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Predifined DeepSpeed stage\",\n },\n )\n deepspeed_config_path: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Path to DeepSpeed config\",\n },\n )\n fsdp_strategy: str = field(\n default=\"\",\n metadata={\n \"help\": \"FSDP strategy\",\n },\n )\n fsdp_offload: bool = field(\n default=True,\n metadata={\n \"help\": \"Offload weights when using FSDP\",\n },\n )\n seed: int = field(\n default=42,\n metadata={\n \"help\": \"Seed value for random operations\",\n },\n )\n stabilize: bool = field(\n default=False,\n metadata={\n \"help\": \"Stabilize the model. Convert some weights (e.g. LoRA) to bf16\",\n },\n )\n norm_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Convert norm to fp32\",\n },\n )\n path_to_env_file: Optional[str] = field(\n default=\"./.env\",\n metadata={\"help\": \"Custom path to .env file\"},\n )\n\n # prepare\n prepare_dataset: bool = field(\n default=True,\n metadata={\n \"help\": 'Prepare the dataset. Works only at \"prepare\" stage',\n },\n )\n\n # fuse\n lora_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. The name of the LoRA model at the hub for fusing. Example: BobaZooba/Shurale\",\n },\n )\n lora_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. Local path to the LoRA model\",\n },\n )\n fused_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Local path to fused model. Useful if you want to quantize model after fusing on the same machine\",\n },\n )\n fuse_after_training: bool = field(\n default=False,\n metadata={\n \"help\": \"Fuse or not model after training\",\n },\n )\n\n # gptq quantization\n quantization_dataset_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Dataset id for GPTQ quantization. You can install either the idi dataset, or use any dataset\",\n },\n )\n quantization_max_samples: int = field(\n default=1024,\n metadata={\n \"help\": \"Max samples for GPTQ quantization if you use custom dataset\",\n },\n )\n quantized_model_path: str = field(\n default=\"./quantized_model/\",\n metadata={\n \"help\": \"Path to GPTQ quantized model\",\n },\n )\n quantized_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub for GPTQ quantization. Example: BobaZooba/Shurale-GPTQ\",\n },\n )\n quantized_hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository for GPTQ quantization model or not\",\n },\n )\n\n # dataset\n dataset_key: str = field(\n default=enums.Datasets.soda,\n metadata={\n \"help\": \"Key of the dataset for loading from datasets_registry\",\n },\n )\n train_local_path_to_data: str = field(\n default=\"./train.jsonl\",\n metadata={\n \"help\": \"The path to the local training data file\",\n },\n )\n eval_local_path_to_data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The path to the local eval data file\",\n },\n )\n shuffle: bool = field(\n default=True,\n metadata={\n \"help\": \"Shuffle training data\",\n },\n )\n max_eval_samples: int = field(\n default=1_000,\n metadata={\n \"help\": \"Maximum number of examples for evaluation\",\n },\n )\n add_eval_to_train_if_no_path: bool = field(\n default=False,\n metadata={\n \"help\": \"Add evaluation data to training data if their number is greater than max_eval_samples\",\n },\n )\n\n # tokenizer\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Tokenizer name or path. If the value is not set, \"\n \"then it will be taken from the model_name_or_path\",\n },\n )\n tokenizer_use_fast: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Use fast flag for the tokenizer\",\n },\n )\n tokenizer_padding_side: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Padding side of the collator: None, right or left\",\n },\n )\n\n # collator\n collator_key: str = field(\n default=enums.Collators.lm,\n metadata={\n \"help\": \"Key of the collator for loading from collators_registry\",\n },\n )\n max_length: int = field(\n default=2048,\n metadata={\n \"help\": \"Max sequence length of the model\",\n },\n )\n\n # model\n model_name_or_path: str = field(\n default=\"mistralai/Mistral-7B-v0.1\",\n metadata={\n \"help\": \"Model name or path. It could be from HuggingFace or locally\",\n },\n )\n push_to_hub_bos_add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload to the hub tokenization config with add_bos_token equals to True. Might be helpful for TGI\"\n },\n )\n use_flash_attention_2: bool = field(\n default=False,\n metadata={\n \"help\": \"Use or not flash attention 2. Requires 1) CUDA >= 11.6; 2) install flash-attn 3) compatible model\",\n },\n )\n trust_remote_code: bool = field(\n default=False,\n metadata={\n \"help\": \"Trust remote code from HuggingFace\",\n },\n )\n device_map: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Device map for loading the model\",\n },\n )\n prepare_model_for_kbit_training: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Prepare or not for kbit training\",\n },\n )\n offload_folder: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Offloading folder. Helps for fusing in colab\",\n },\n )\n\n # bitsandbytes\n load_in_8bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 8 bit using bitsandbytes\",\n },\n )\n load_in_4bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 4 bit using bitsandbytes\",\n },\n )\n llm_int8_threshold: float = field(\n default=6.0,\n metadata={\n \"help\": \"Threshold for outlier detection\",\n },\n )\n llm_int8_has_fp16_weight: bool = field(\n default=True,\n metadata={\n \"help\": \"LLM has weights in fp16\",\n },\n )\n bnb_4bit_use_double_quant: bool = field(\n default=True,\n metadata={\n \"help\": \"Double quantization. \"\n \"This will enable a second quantization after the first \"\n \"one to save an additional 0.4 bits per parameter\",\n },\n )\n bnb_4bit_quant_type: str = field(\n default=\"nf4\",\n metadata={\n \"help\": \"Quantization type for 4 bit\",\n },\n )\n bnb_quantize_after_model_init: bool = field(\n default=False, metadata={\"help\": \"If False, quantization will be at model init\"}\n )\n\n # gptq\n gptq_bits: int = field(\n default=4,\n metadata={\n \"help\": \"Bits for GPTQ quantization\",\n },\n )\n gptq_group_size: int = field(\n default=128,\n metadata={\n \"help\": \"Group size for GPTQ quantization\",\n },\n )\n gptq_disable_exllama: bool = field(\n default=True,\n metadata={\n \"help\": \"Disable ExLlama kernels for GPTQ quantization\",\n },\n )\n\n # lora\n apply_lora: bool = field(\n default=False,\n metadata={\n \"help\": \"Apply LoRA to the model or not\",\n },\n )\n lora_rank: int = field(\n default=8,\n metadata={\n \"help\": \"LoRA rank value. LoRA matrices W_A x R and R x W_B, where R is LoRA rank\",\n },\n )\n lora_alpha: int = field(\n default=32,\n metadata={\n \"help\": \"LoRA alpha value. The resulting LoRA matrix will be multiplied by this value\",\n },\n )\n lora_dropout: float = field(\n default=0.1,\n metadata={\n \"help\": \"LoRA dropout value\",\n },\n )\n raw_lora_target_modules: str = field(\n default=\"all\",\n metadata={\n \"help\": 'Names of modules to apply LoRA. A comma-separated string, for example: \"k,q,v\". '\n 'When setting the value \"all\", LoRA will be applied to all linear layers, except for the '\n \"input embeddings and the lm_head.\",\n },\n )\n\n # training arguments\n output_dir: str = field(\n default=\"./outputs/\",\n metadata={\n \"help\": \"The path to the directory where the artifacts will be saved\",\n },\n )\n per_device_train_batch_size: int = field(\n default=2,\n metadata={\n \"help\": \"Batch size on each GPU\",\n },\n )\n do_eval: bool = field(\n default=False,\n metadata={\n \"help\": \"Run eval or not\",\n },\n )\n per_device_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Batch size on each GPU for evaluation. \"\n \"If None per_device_eval_batch_size equals to per_device_train_batch_size\",\n },\n )\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\n \"help\": \"Number of steps to accumulate gradients\",\n },\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of steps to accumulate gradients at evaluation.\"\n \"If None eval_accumulation_steps equals to gradient_accumulation_steps\",\n },\n )\n eval_delay: int = field(\n default=0,\n metadata={\n \"help\": \"Number of epochs or steps to wait for before the first \"\n \"evaluation can be performed, depending on the evaluation_strategy\"\n },\n )\n eval_steps: Optional[int] = field(\n default=1_000, metadata={\"help\": \"Number of update steps between two evaluations\"}\n )\n warmup_steps: int = field(\n default=1_000,\n metadata={\n \"help\": \"Number of steps to warm up\",\n },\n )\n max_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Maximum number of training steps\",\n },\n )\n num_train_epochs: int = field(\n default=1,\n metadata={\n \"help\": \"Number of training epochs\",\n },\n )\n learning_rate: float = field(\n default=2e-4,\n metadata={\n \"help\": \"Learning rate value\",\n },\n )\n max_grad_norm: float = field(\n default=1.0,\n metadata={\n \"help\": \"Clip grad value\",\n },\n )\n weight_decay: float = field(\n default=0.001,\n metadata={\n \"help\": \"Weight decay value\",\n },\n )\n label_smoothing_factor: float = field(\n default=0.0,\n metadata={\n \"help\": \"Label smoothing value\",\n },\n )\n logging_steps: int = field(\n default=10,\n metadata={\n \"help\": \"Number of steps between logging\",\n },\n )\n save_steps: int = field(\n default=100,\n metadata={\n \"help\": \"The number of training steps between saving the checkpoint and uploading to the hub\",\n },\n )\n save_total_limit: int = field(\n default=1,\n metadata={\n \"help\": \"The number of checkpoints that are saved locally\",\n },\n )\n optim: Optional[str] = field(\n default=\"paged_adamw_8bit\",\n metadata={\n \"help\": \"Optimizer name. It will be overwritten if you use deepspeed\",\n },\n )\n push_to_hub: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload the model to the hub. \"\n \"The model will be uploaded to the hub every save_steps. \"\n \"If LoRA is used, then LoRA's weights will be loaded onto the hub\",\n },\n )\n hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub. Example: BobaZooba/Shurale\",\n },\n )\n hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository or not\",\n },\n )\n neftune_noise_alpha: Optional[float] = field(\n default=None,\n metadata={\n \"help\": \"If not None, this will activate NEFTune noise embeddings. \"\n \"This can drastically improve model performance for instruction fine-tuning\",\n },\n )\n\n # training traction\n project_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Project name for training traction services like W&B\",\n },\n )\n report_to_wandb: bool = field(\n default=False,\n metadata={\n \"help\": \"Report or not to Weight & Biases\",\n },\n )\n wandb_api_key: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases API key. You can also set this key using .env file\",\n },\n )\n wandb_project: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Depreacted, use project_name. Weight & Biases project name\",\n },\n )\n wandb_entity: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases entity name (user or company)\",\n },\n )\n\n def __post_init__(self):\n if self.huggingface_hub_token is not None:\n os.environ[enums.EnvironmentVariables.huggingface_hub_token] = self.huggingface_hub_token\n dist_logger(message=f\"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set\")\n\n if self.report_to_wandb:\n for key, value in zip(\n [\n enums.EnvironmentVariables.wandb_api_key,\n enums.EnvironmentVariables.wandb_project,\n enums.EnvironmentVariables.wandb_entity,\n ],\n [\n self.wandb_api_key,\n self.correct_project_name,\n self.wandb_entity,\n ],\n ):\n if value is not None:\n os.environ[key] = value\n dist_logger(message=f\"Environment variable {key} set\")\n else:\n os.environ[enums.EnvironmentVariables.wandb_disabled] = \"true\"\n\n @property\n def correct_project_name(self) -> Optional[str]:\n if self.project_name is not None and self.wandb_project is not None:\n dist_logger.warning(\n message=\"You set both project_name and wandb_project.\"\n \"Priority set to project_name for experiment tracking\"\n )\n return self.project_name\n elif self.project_name is not None:\n return self.project_name\n elif self.wandb_project is not None:\n dist_logger.warning(message=\"wandb_project is depreacted, please use project_name instead\")\n return self.wandb_project\n else:\n return None\n\n def check_hub(self) -> None:\n if self.push_to_hub and self.hub_model_id is None:\n raise ValueError(\"You want to push to HF hub, but hub_model_id is None\")\n elif self.hub_model_id is not None and not self.push_to_hub:\n dist_logger.warning(\"You set hub_model_id, but push_to_hub is False\")\n\n return None\n\n def apply_deepspeed_single_gpu(self) -> None:\n os.environ[enums.EnvironmentVariables.master_address] = \"localhost\"\n os.environ[enums.EnvironmentVariables.master_port] = str(self.master_port)\n os.environ[enums.EnvironmentVariables.rank] = \"0\"\n os.environ[enums.EnvironmentVariables.local_rank] = \"0\"\n os.environ[enums.EnvironmentVariables.world_size] = \"1\"\n\n def check_deepspeed(self) -> None:\n if self.deepspeed is not None:\n spec = find_spec(\"deepspeed\")\n\n if spec is None:\n raise ImportError(\"Deepspeed is not None, but failed to import deepspeed. Please install deepspeed.\")\n\n if self.single_gpu:\n self.apply_deepspeed_single_gpu()\n\n return None\n\n def check_flash_attention(self) -> None:\n if self.use_flash_attention_2:\n if not torch.cuda.is_available():\n raise ImportError(\"You want to use flash_attention_2, but CUDA is not available\")\n\n spec = find_spec(\"flash_attn\")\n\n if spec is None:\n raise ImportError(\n \"You want to use flash_attention_2, but flash-attn is not installed. Please install flash-attn.\"\n )\n\n return None\n\n def check_auto_gptq(self) -> None:\n spec = find_spec(\"auto_gptq\")\n\n if spec is None:\n raise ImportError(\n \"You want to quantize model using GPTQ, but auto-gptq is not installed. Please install auto-gptq.\"\n )\n\n return None\n\n def check(self) -> None:\n \"\"\"\n Performs a series of checks to validate the configuration for compatibility with the training environment.\n\n This method is responsible for ensuring that the environment is properly set up for the actions specified in\n the configuration object, such as pushing to Hugging Face's hub, using deepspeed, and using flash attention.\n\n It includes the following checks:\n - Verifies that credentials for Hugging Face hub are provided if the model is intended to be pushed to the hub.\n - Validates that deepspeed is installed if it is specified in the configuration.\n - Ensures that the necessary packages are installed for using flash attention if configured to do so.\n\n Does not return any value.\n\n Raises:\n - ValueError: If the configuration for hub interaction is incorrect.\n - ImportError: If any of the required libraries (e.g., deepspeed, flash-attn, auto-gptq) are not installed.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(...)\n # Before proceeding with training or other operations, run checks to ensure environment compatibility.\n config.check()\n ```\n\n Note:\n - Always invoke this method after initializing a `Config` object and before proceeding with model training\n or other operations that rely on the configuration settings.\n \"\"\"\n self.check_hub()\n self.check_deepspeed()\n self.check_flash_attention()\n\n return None\n\n @property\n def correct_tokenizer_name_or_path(self) -> str:\n \"\"\"\n Resolves the tokenizer name or path to be used for initializing the tokenizer.\n\n This property ensures that if a specific tokenizer name or path is not provided in the configuration object,\n the model name or path is used instead, maintaining consistency between model and tokenizer.\n\n Returns:\n `str`: The name or path of the tokenizer to be used. If `tokenizer_name_or_path` is specified in `Config`\n object, that value is used. Otherwise, `model_name_or_path` is returned as the default tokenizer identifier.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(model_name_or_path=\"gpt2\", tokenizer_name_or_path=None)\n tokenizer_name_or_path = config.correct_tokenizer_name_or_path\n # tokenizer_name_or_path now holds the value \"gpt2\"\n ```\n\n Note:\n - It is a common practice to use the same identifier for both the model and its corresponding tokenizer.\n This property handles such a case automatically when the `tokenizer_name_or_path` is not explicitly set.\n \"\"\"\n if self.tokenizer_name_or_path is not None:\n return self.tokenizer_name_or_path\n else:\n return self.model_name_or_path\n\n @property\n def lora_target_modules(self) -> Optional[List[str]]:\n \"\"\"\n Interprets the LoRA target modules setting from the configuration to determine which model modules to apply\n LoRA to.\n\n LoRA (Low-Rank Adaptation) is a parameter-efficient training method that modifies specific layers within a\n model. This property is responsible for parsing the `raw_lora_target_modules` configuration to identify\n the specific modules (like attention key, query, and value matrices) that LoRA will be applied to.\n\n Returns:\n Optional[List[str]]: A list of module names to apply LoRA to if specified, otherwise `None` if LoRA should\n be applied to all eligible modules as determined by the string \"all\" in `raw_lora_target_modules`.\n\n Raises:\n ValueError: If `raw_lora_target_modules` is not set.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with LoRA targets specified.\n config = Config(raw_lora_target_modules=\"k,q,v\")\n lora_modules = config.lora_target_modules\n # lora_modules now holds the list ['k', 'q', 'v'].\n ```\n\n Note:\n - The `raw_lora_target_modules` should be provided as a comma-separated string specifying the target\n modules. If LoRA should be applied broadly, the value \"all\" can be used.\n \"\"\"\n if self.raw_lora_target_modules == \"all\":\n return None\n elif self.raw_lora_target_modules is not None:\n modules_names = [module_name.strip() for module_name in self.raw_lora_target_modules.split(\",\")]\n return modules_names\n else:\n raise ValueError(\"raw_lora_target_modules doesn't set\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Determines the appropriate PyTorch data type for the model based on availability of CUDA and configuration\n settings.\n\n This property assists in setting computational precision for training and inference (e.g., FP32, FP16, BF16),\n basing the decision on system capabilities and user preferences as defined in the `Config` object. The selected\n data type can impact both the computational efficiency and memory usage of the model operations.\n\n Returns:\n `torch.dtype`: The data type to be used for the model tensors. This can be one of the following based on the\n system's CUDA support and configuration flags: `torch.float32` (FP32), `torch.float16` (FP16), or\n `torch.bfloat16` (BF16).\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(force_fp32=False, force_fp16=True)\n model_dtype = config.dtype\n # If CUDA is available and BF16 is supported, model_dtype will be `torch.bfloat16`.\n # Otherwise, it falls back to `torch.float16` due to the forced FP16 configuration.\n ```\n\n Note:\n - This property plays a critical role in memory management and computational efficiency, especially when\n working with large models or limited system resources.\n \"\"\"\n if not torch.cuda.is_available() or self.force_fp32:\n return torch.float32\n elif self.force_fp16:\n return torch.float16\n elif torch.cuda.is_bf16_supported():\n return torch.bfloat16\n else:\n return torch.float16\n\n @property\n def deepspeed(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieves the deepspeed configuration dictionary based on settings within the `Config` object.\n\n This property parses the deepspeed settings from the configuration to construct the configuration dictionary\n used for ing up deepspeed in the model's training environment. It determines whether a predefined stage\n or a custom configuration file path should be utilized.\n\n Returns:\n `Optional[Dict[str, Any]]`: A dictionary containing deepspeed configurations, or `None` if deepspeed is not\n to be used.\n\n Raises:\n ValueError: If the `deepspeed_stage` specified does not correspond to a known configuration,\n or if a custom deepspeed configuration file path does not exist.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with deepspeed specifications.\n config = Config(deepspeed_stage=\"2\")\n ds_config = config.deepspeed\n # ds_config now contains the deepspeed configuration for stage 2.\n ```\n\n Note:\n - A deepspeed stage is a set of predefined configurations. If this is set, the corresponding configuration\n will be used and any custom deepspeed configuration file will be ignored.\n - If a custom deepspeed configuration file path is given and it exists, that configuration will be loaded\n and used.\n \"\"\"\n deepspeed_config: Optional[Dict[str, Any]] = None\n\n if self.deepspeed_config_path is not None:\n if os.path.isfile(self.deepspeed_config_path):\n with open(self.deepspeed_config_path) as file_object:\n deepspeed_config = json.load(file_object)\n return deepspeed_config\n else:\n raise ValueError(f\"deepspeed_config_path set to {self.deepspeed_config_path}, but not found\")\n\n if self.deepspeed_stage in [0, \"0\", \"stage_0\"]:\n return None\n\n if self.deepspeed_stage is not None:\n deepspeed_config = DS_CONFIG_MAPPER.get(self.deepspeed_stage, None)\n if deepspeed_config is None:\n raise ValueError(\n f'Deepspeed stage \"{self.deepspeed_stage}\" not found in keys: {list(DS_CONFIG_MAPPER.keys())}'\n )\n\n return deepspeed_config\n\n @property\n def fsdp(self) -> Union[str, List[str]]:\n \"\"\"\n Compiles the configurations for Fully Sharded Data Parallel (FSDP) based on the settings in the `Config` object.\n\n This property creates a list containing FSDP-related options, which informs the training process whether to\n enable FSDP and which FSDP strategy to employ.\n\n A list of options (fsdp_strategy) along the following:\n \"full_shard\": Shard parameters, gradients and optimizer states.\n \"shard_grad_op\": Shard optimizer states and gradients.\n \"offload\": Offload parameters and gradients to CPUs (only compatible with \"full_shard\" and \"shard_grad_op\").\n \"auto_wrap\": Automatically recursively wrap layers with FSDP using default_auto_wrap_policy.\n\n Returns:\n `Union[str, List[str]]`: A list of FSDP options as strings. It can be an empty string if FSDP is not used or\n a list with the specified FSDP strategy and options such as offloading.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with FSDP specifications.\n config = Config(fsdp_strategy=\"full_shard\", fsdp_offload=True)\n fsdp_options = config.fsdp\n ```\n\n Note:\n - FSDP strategies and options improve memory efficiency during distributed training by sharding the model's\n parameters across multiple devices.\n - The FSDP settings in the configuration should match the target training environment and system\n capabilities.\n \"\"\"\n fsdp_options = list()\n\n if self.fsdp_strategy is not None and self.fsdp_strategy != \"\":\n fsdp_options.append(self.fsdp_strategy)\n else:\n return \"\"\n\n if self.fsdp_offload:\n fsdp_options.append(FSDPOption.OFFLOAD)\n\n return fsdp_options\n\n @property\n def lora_model_name_or_path_for_fusing(self) -> str:\n \"\"\"\n Determines the name or path of the LoRA model to be used for the fusing process.\n\n This property resolves which model should be fused by checking whether a model ID from the Hugging Face hub or a\n local path to a LoRA model is provided in the configuration object. It is essential for the fusing operation\n when LoRA weights need to be integrated into the base model.\n\n Returns:\n `str`: The Hugging Face hub model ID or the local file path to the LoRA model, depending on which is\n specified.\n\n Raises:\n ValueError: If neither `lora_hub_model_id` nor `lora_model_local_path` is set, indicating that there is no\n model specified for fusing.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with a specified LoRA model on Hugging Face Hub or locally.\n config = Config(lora_hub_model_id=\"username/model-id\", lora_model_local_path=None)\n model_name_or_path = config.lora_model_name_or_path_for_fusing\n # model_name_or_path will hold the value \"username/model-id\".\n ```\n\n Note:\n - This property is specifically used during the model fusing step and should be configured correctly in\n scenarios where LoRA is utilized.\n \"\"\"\n if self.lora_hub_model_id is not None:\n return self.lora_hub_model_id\n elif self.lora_model_local_path is not None:\n return self.lora_model_local_path\n else:\n raise ValueError(\"Please set lora_hub_model_id or lora_model_local_path for fusing\")\n\n @property\n def need_to_prepare_model_for_kbit_training(self) -> bool:\n if self.prepare_model_for_kbit_training is not None:\n return self.prepare_model_for_kbit_training\n else:\n return self.from_gptq or self.load_in_4bit or self.load_in_8bit" }, { "identifier": "SodaDataset", "path": "src/xllm/datasets/soda.py", "snippet": "class SodaDataset(BaseDataset):\n HEADER_KEY = \"header\"\n DIALOG_KEY = \"dialog\"\n\n _HF_DATASET_ID = \"allenai/soda\"\n\n def __init__(self, data: List[RawSample], header_drop_probability: float = 0.05):\n super().__init__(data=data)\n self.header_drop_probability = header_drop_probability\n\n @classmethod\n def get_data(cls, config: Config) -> Optional[Tuple[List[RawSample], Optional[List[RawSample]]]]:\n soda_dataset = datasets.load_dataset(cls._HF_DATASET_ID)\n\n parsed_data: Dict[str, List[RawSample]] = dict()\n\n known_indices = set()\n\n for split in [\"train\", \"test\"]:\n parsed_data[split] = list()\n\n for sample in tqdm(soda_dataset[split], desc=f\"Parsing SODA {split}\"):\n index = sample.get(\"original_index\")\n\n if index in known_indices:\n continue\n\n parsed_sample = {\n cls.HEADER_KEY: sample.get(\"narrative\"),\n cls.DIALOG_KEY: [\n f\"{speaker}: {phrase}\"\n for speaker, phrase in zip(sample.get(\"speakers\"), sample.get(\"dialogue\"))\n ],\n }\n\n parsed_data[split].append(parsed_sample)\n known_indices.add(index)\n\n train = parsed_data[\"train\"]\n valid = parsed_data[\"test\"]\n\n return train, valid\n\n def get_sample(self, index: int) -> RawSample:\n sample = self.data[index]\n\n dialog = sample[self.DIALOG_KEY]\n\n phrases = list()\n\n if not isinstance(dialog, list):\n raise ValueError(f\"{self.DIALOG_KEY} of sample is not a list: {type(dialog)}\")\n\n for phrase in dialog:\n if isinstance(phrase, str):\n phrases.append(phrase)\n\n if self.HEADER_KEY in sample:\n header = sample[self.HEADER_KEY]\n\n is_drop_header = np.random.rand() <= self.header_drop_probability\n\n if not is_drop_header and isinstance(header, str):\n phrases.insert(0, header)\n\n sample = {enums.General.text_parts: [phrase.replace(\"\\n\", \" \").replace(\"\\r\", \" \") for phrase in phrases]}\n\n return sample" }, { "identifier": "FALCON_TOKENIZER_DIR", "path": "tests/helpers/constants.py", "snippet": "FALCON_TOKENIZER_DIR: str = os.path.join(TOKENIZERS_DIR, \"falcon/\")" }, { "identifier": "LLAMA_TOKENIZER_DIR", "path": "tests/helpers/constants.py", "snippet": "LLAMA_TOKENIZER_DIR: str = os.path.join(TOKENIZERS_DIR, \"llama/\")" }, { "identifier": "LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES", "path": "tests/helpers/constants.py", "snippet": "LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES = [\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n \"o_proj\",\n \"gate_proj\",\n \"up_proj\",\n \"down_proj\",\n]" }, { "identifier": "DATA", "path": "tests/helpers/dummy_data.py", "snippet": "DATA = [\n {\n enums.General.text_parts: [\n \"Person 1: Hello\",\n \"Person 2: It's me\",\n \"Person 1: I was wondering\",\n ]\n },\n {\n enums.General.text_parts: [\n \"You are a sith lord\",\n \"Kenobi: Hello there\",\n \"General Grievous: General Kenobi\",\n ]\n },\n]" }, { "identifier": "SODA_DATASET", "path": "tests/helpers/dummy_data.py", "snippet": "SODA_DATASET = [\n {\n SodaDataset.HEADER_KEY: \"This is dialog\",\n SodaDataset.DIALOG_KEY: [\"Hello\", \"It's me\", \"I was wondering\"],\n },\n {\n SodaDataset.HEADER_KEY: \"This is another dialog\",\n SodaDataset.DIALOG_KEY: [\"Sup\", \"Hello\", \"It's me\", \"I was wondering\", \"Please buy some coins\"],\n },\n]" } ]
import json import os import pytest from _pytest.tmpdir import TempPathFactory from peft import LoraConfig, PeftModel, get_peft_model from transformers import ( AutoTokenizer, FalconConfig, FalconForCausalLM, LlamaConfig, LlamaForCausalLM, PreTrainedTokenizer, TrainingArguments, ) from src.xllm import enums from src.xllm.collators.lm import LMCollator from src.xllm.core.config import Config from src.xllm.datasets.soda import SodaDataset from tests.helpers.constants import ( FALCON_TOKENIZER_DIR, LLAMA_TOKENIZER_DIR, LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES, ) from tests.helpers.dummy_data import DATA, SODA_DATASET
13,723
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @pytest.fixture(scope="session") def llama_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(LLAMA_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def llama_model_config(llama_tokenizer: PreTrainedTokenizer) -> LlamaConfig: config = LlamaConfig( vocab_size=len(llama_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def llama_model(llama_model_config: LlamaConfig) -> LlamaForCausalLM: model = LlamaForCausalLM(config=llama_model_config) return model @pytest.fixture(scope="session") def llama_lora_config() -> LoraConfig: lora_config = LoraConfig( r=2, target_modules=LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES, bias="none", task_type="CAUSAL_LM", lora_alpha=8, lora_dropout=0.1, ) return lora_config @pytest.fixture(scope="session") def llama_lora_model(llama_model: LlamaForCausalLM, llama_lora_config: LoraConfig) -> PeftModel: llama_model = get_peft_model(model=llama_model, peft_config=llama_lora_config) return llama_model @pytest.fixture(scope="session") def falcon_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(FALCON_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def falcon_model_config(falcon_tokenizer: PreTrainedTokenizer) -> FalconConfig: config = FalconConfig( vocab_size=len(falcon_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def falcon_model(falcon_model_config: FalconConfig) -> FalconForCausalLM: model = FalconForCausalLM(config=falcon_model_config) return model @pytest.fixture(scope="session") def soda_dataset() -> SodaDataset: dataset = SodaDataset(data=SODA_DATASET) return dataset @pytest.fixture(scope="session") def llama_lm_collator(llama_tokenizer: PreTrainedTokenizer) -> LMCollator: collator = LMCollator(tokenizer=llama_tokenizer, max_length=32) return collator @pytest.fixture(scope="session") def path_to_outputs(tmp_path_factory: TempPathFactory) -> str: path = tmp_path_factory.mktemp("tmp") / "outputs/" return os.path.abspath(path) @pytest.fixture(scope="session") def training_arguments(path_to_outputs: str) -> TrainingArguments: arguments = TrainingArguments( output_dir=path_to_outputs, per_device_train_batch_size=2, gradient_accumulation_steps=2, warmup_steps=50, learning_rate=2e-4, max_steps=500, num_train_epochs=1, weight_decay=0.001, max_grad_norm=1.0, label_smoothing_factor=0.1, logging_steps=10, save_strategy="steps", save_steps=100, save_total_limit=1, hub_strategy="checkpoint", push_to_hub=False, save_safetensors=True, remove_unused_columns=False,
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @pytest.fixture(scope="session") def llama_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(LLAMA_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def llama_model_config(llama_tokenizer: PreTrainedTokenizer) -> LlamaConfig: config = LlamaConfig( vocab_size=len(llama_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def llama_model(llama_model_config: LlamaConfig) -> LlamaForCausalLM: model = LlamaForCausalLM(config=llama_model_config) return model @pytest.fixture(scope="session") def llama_lora_config() -> LoraConfig: lora_config = LoraConfig( r=2, target_modules=LORA_FOR_LLAMA_DEFAULT_TARGET_MODULES, bias="none", task_type="CAUSAL_LM", lora_alpha=8, lora_dropout=0.1, ) return lora_config @pytest.fixture(scope="session") def llama_lora_model(llama_model: LlamaForCausalLM, llama_lora_config: LoraConfig) -> PeftModel: llama_model = get_peft_model(model=llama_model, peft_config=llama_lora_config) return llama_model @pytest.fixture(scope="session") def falcon_tokenizer() -> PreTrainedTokenizer: tokenizer = AutoTokenizer.from_pretrained(FALCON_TOKENIZER_DIR) tokenizer.pad_token = tokenizer.eos_token return tokenizer @pytest.fixture(scope="session") def falcon_model_config(falcon_tokenizer: PreTrainedTokenizer) -> FalconConfig: config = FalconConfig( vocab_size=len(falcon_tokenizer), hidden_size=8, intermediate_size=32, num_hidden_layers=2, num_attention_heads=2, max_position_embeddings=32, ) return config @pytest.fixture(scope="session") def falcon_model(falcon_model_config: FalconConfig) -> FalconForCausalLM: model = FalconForCausalLM(config=falcon_model_config) return model @pytest.fixture(scope="session") def soda_dataset() -> SodaDataset: dataset = SodaDataset(data=SODA_DATASET) return dataset @pytest.fixture(scope="session") def llama_lm_collator(llama_tokenizer: PreTrainedTokenizer) -> LMCollator: collator = LMCollator(tokenizer=llama_tokenizer, max_length=32) return collator @pytest.fixture(scope="session") def path_to_outputs(tmp_path_factory: TempPathFactory) -> str: path = tmp_path_factory.mktemp("tmp") / "outputs/" return os.path.abspath(path) @pytest.fixture(scope="session") def training_arguments(path_to_outputs: str) -> TrainingArguments: arguments = TrainingArguments( output_dir=path_to_outputs, per_device_train_batch_size=2, gradient_accumulation_steps=2, warmup_steps=50, learning_rate=2e-4, max_steps=500, num_train_epochs=1, weight_decay=0.001, max_grad_norm=1.0, label_smoothing_factor=0.1, logging_steps=10, save_strategy="steps", save_steps=100, save_total_limit=1, hub_strategy="checkpoint", push_to_hub=False, save_safetensors=True, remove_unused_columns=False,
log_level=enums.LogLevel.info,
0
2023-11-10 17:55:03+00:00
16k
AMAAI-Lab/mustango
diffusers/src/diffusers/pipelines/pipeline_flax_utils.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes inheriting from [`ConfigMixin`] with\n - [`~ConfigMixin.from_config`]\n - [`~ConfigMixin.save_config`]\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the init function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class will be instantiated. Make sure to only load\n configuration files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the Python class.\n `**kwargs` will be directly passed to the underlying scheduler/model's `__init__` method and eventually\n overwrite same named arguments of `config`.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~ConfigMixin.save_config`], e.g.,\n `./my_model_directory/`.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config shall be returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the commit_hash of the loaded configuration shall be returned.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "FLAX_WEIGHTS_NAME", "path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "snippet": "class FlaxModelMixin:\n def _from_config(cls, config, **kwargs):\n def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n def conditional_cast(param):\n def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n def init_weights(self, rng: jax.random.KeyArray) -> Dict:\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n dtype: jnp.dtype = jnp.float32,\n *model_args,\n **kwargs,\n ):\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n params: Union[Dict, FrozenDict],\n is_main_process: bool = True,\n ):" }, { "identifier": "SCHEDULER_CONFIG_NAME", "path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py", "snippet": "SCHEDULER_CONFIG_NAME = \"scheduler_config.json\"" }, { "identifier": "FlaxSchedulerMixin", "path": "diffusers/src/diffusers/schedulers/scheduling_utils_flax.py", "snippet": "class FlaxSchedulerMixin:\n \"\"\"\n Mixin containing common functions for the schedulers.\n\n Class attributes:\n - **_compatibles** (`List[str]`) -- A list of classes that are compatible with the parent class, so that\n `from_config` can be used from a class different than the one used to save the config (should be overridden\n by parent class).\n \"\"\"\n\n config_name = SCHEDULER_CONFIG_NAME\n ignore_for_config = [\"dtype\"]\n _compatibles = []\n has_compatibles = True\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Dict[str, Any] = None,\n subfolder: Optional[str] = None,\n return_unused_kwargs=False,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a Scheduler class from a pre-defined JSON-file.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~SchedulerMixin.save_pretrained`],\n e.g., `./my_model_directory/`.\n subfolder (`str`, *optional*):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n\n \"\"\"\n config, kwargs = cls.load_config(\n pretrained_model_name_or_path=pretrained_model_name_or_path,\n subfolder=subfolder,\n return_unused_kwargs=True,\n **kwargs,\n )\n scheduler, unused_kwargs = cls.from_config(config, return_unused_kwargs=True, **kwargs)\n\n if hasattr(scheduler, \"create_state\") and getattr(scheduler, \"has_state\", False):\n state = scheduler.create_state()\n\n if return_unused_kwargs:\n return scheduler, state, unused_kwargs\n\n return scheduler, state\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a scheduler configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~FlaxSchedulerMixin.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n self.save_config(save_directory=save_directory, push_to_hub=push_to_hub, **kwargs)\n\n @property\n def compatibles(self):\n \"\"\"\n Returns all schedulers that are compatible with this scheduler\n\n Returns:\n `List[SchedulerMixin]`: List of compatible schedulers\n \"\"\"\n return self._get_compatibles()\n\n @classmethod\n def _get_compatibles(cls):\n compatible_classes_str = list(set([cls.__name__] + cls._compatibles))\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n compatible_classes = [\n getattr(diffusers_library, c) for c in compatible_classes_str if hasattr(diffusers_library, c)\n ]\n return compatible_classes" }, { "identifier": "logging", "path": "diffusers/src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "CONFIG_NAME", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "CONFIG_NAME = \"config.json\"" }, { "identifier": "DIFFUSERS_CACHE", "path": "diffusers/src/diffusers/utils/constants.py", "snippet": "DIFFUSERS_CACHE = default_cache_path" }, { "identifier": "http_user_agent", "path": "diffusers/src/diffusers/utils/hub_utils.py", "snippet": "def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str:\n \"\"\"\n Formats a user-agent string with basic info about a request.\n \"\"\"\n ua = f\"diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}\"\n if DISABLE_TELEMETRY or HF_HUB_OFFLINE:\n return ua + \"; telemetry/off\"\n if is_torch_available():\n ua += f\"; torch/{_torch_version}\"\n if is_flax_available():\n ua += f\"; jax/{_jax_version}\"\n ua += f\"; flax/{_flax_version}\"\n if is_onnx_available():\n ua += f\"; onnxruntime/{_onnxruntime_version}\"\n # CI will set this value to True\n if os.environ.get(\"DIFFUSERS_IS_CI\", \"\").upper() in ENV_VARS_TRUE_VALUES:\n ua += \"; is_ci/true\"\n if isinstance(user_agent, dict):\n ua += \"; \" + \"; \".join(f\"{k}/{v}\" for k, v in user_agent.items())\n elif isinstance(user_agent, str):\n ua += \"; \" + user_agent\n return ua" }, { "identifier": "is_transformers_available", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_transformers_available():\n return _transformers_available" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `BaseOutput` directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" } ]
import importlib import inspect import os import flax import numpy as np import PIL from typing import Any, Dict, List, Optional, Union from flax.core.frozen_dict import FrozenDict from huggingface_hub import snapshot_download from PIL import Image from tqdm.auto import tqdm from ..configuration_utils import ConfigMixin from ..models.modeling_flax_utils import FLAX_WEIGHTS_NAME, FlaxModelMixin from ..schedulers.scheduling_utils_flax import SCHEDULER_CONFIG_NAME, FlaxSchedulerMixin from ..utils import CONFIG_NAME, DIFFUSERS_CACHE, BaseOutput, http_user_agent, is_transformers_available, logging from transformers import FlaxPreTrainedModel from diffusers import pipelines from diffusers import pipelines
11,427
Instantiate a Flax diffusion pipeline from pre-trained pipeline weights. The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* of a pretrained pipeline hosted inside a model repo on https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like `CompVis/ldm-text2im-large-256`. - A path to a *directory* containing pipeline weights saved using [`~FlaxDiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`. dtype (`str` or `jnp.dtype`, *optional*): Override the default `jnp.dtype` and load the model under this dtype. If `"auto"` is passed the dtype will be automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the specific pipeline class. The overwritten components are then directly passed to the pipelines `__init__` method. See example below for more information. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"` </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```py >>> from diffusers import FlaxDiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> # Requires to be logged in to Hugging Face hub, >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", ... revision="bf16", ... dtype=jnp.bfloat16, ... ) >>> # Download pipeline, but use a different scheduler >>> from diffusers import FlaxDPMSolverMultistepScheduler >>> model_id = "runwayml/stable-diffusion-v1-5" >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... ) >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( ... model_id, revision="bf16", dtype=jnp.bfloat16, scheduler=dpmpp ... ) >>> dpm_params["scheduler"] = dpmpp_state ``` """ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) from_pt = kwargs.pop("from_pt", False) dtype = kwargs.pop("dtype", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): config_dict = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, ) # make sure we only download sub-folders and `diffusers` filenames folder_names = [k for k in config_dict.keys() if not k.startswith("_")] allow_patterns = [os.path.join(k, "*") for k in folder_names]
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. if is_transformers_available(): INDEX_FILE = "diffusion_flax_model.bin" logger = logging.get_logger(__name__) LOADABLE_CLASSES = { "diffusers": { "FlaxModelMixin": ["save_pretrained", "from_pretrained"], "FlaxSchedulerMixin": ["save_pretrained", "from_pretrained"], "FlaxDiffusionPipeline": ["save_pretrained", "from_pretrained"], }, "transformers": { "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"], "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"], "FlaxPreTrainedModel": ["save_pretrained", "from_pretrained"], "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"], "ProcessorMixin": ["save_pretrained", "from_pretrained"], "ImageProcessingMixin": ["save_pretrained", "from_pretrained"], }, } ALL_IMPORTABLE_CLASSES = {} for library in LOADABLE_CLASSES: ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library]) def import_flax_or_no_model(module, class_name): try: # 1. First make sure that if a Flax object is present, import this one class_obj = getattr(module, "Flax" + class_name) except AttributeError: # 2. If this doesn't work, it's not a model and we don't append "Flax" class_obj = getattr(module, class_name) except AttributeError: raise ValueError(f"Neither Flax{class_name} nor {class_name} exist in {module}") return class_obj @flax.struct.dataclass class FlaxImagePipelineOutput(BaseOutput): """ Output class for image pipelines. Args: images (`List[PIL.Image.Image]` or `np.ndarray`) List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width, num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline. """ images: Union[List[PIL.Image.Image], np.ndarray] class FlaxDiffusionPipeline(ConfigMixin): r""" Base class for all models. [`FlaxDiffusionPipeline`] takes care of storing all components (models, schedulers, processors) for diffusion pipelines and handles methods for loading, downloading and saving models as well as a few methods common to all pipelines to: - enabling/disabling the progress bar for the denoising iteration Class attributes: - **config_name** ([`str`]) -- name of the config file that will store the class and module names of all components of the diffusion pipeline. """ config_name = "model_index.json" def register_modules(self, **kwargs): # import it here to avoid circular import for name, module in kwargs.items(): if module is None: register_dict = {name: (None, None)} else: # retrieve library library = module.__module__.split(".")[0] # check if the module is a pipeline module pipeline_dir = module.__module__.split(".")[-2] path = module.__module__.split(".") is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir) # if library is not in LOADABLE_CLASSES, then it is a custom module. # Or if it's a pipeline module, then the module is inside the pipeline # folder so we set the library to module name. if library not in LOADABLE_CLASSES or is_pipeline_module: library = pipeline_dir # retrieve class_name class_name = module.__class__.__name__ register_dict = {name: (library, class_name)} # save model index config self.register_to_config(**register_dict) # set models setattr(self, name, module) def save_pretrained(self, save_directory: Union[str, os.PathLike], params: Union[Dict, FrozenDict]): # TODO: handle inference_state """ Save all variables of the pipeline that can be saved and loaded as well as the pipelines configuration file to a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading method. The pipeline can easily be re-loaded using the `[`~FlaxDiffusionPipeline.from_pretrained`]` class method. Arguments: save_directory (`str` or `os.PathLike`): Directory to which to save. Will be created if it doesn't exist. """ self.save_config(save_directory) model_index_dict = dict(self.config) model_index_dict.pop("_class_name") model_index_dict.pop("_diffusers_version") model_index_dict.pop("_module", None) for pipeline_component_name in model_index_dict.keys(): sub_model = getattr(self, pipeline_component_name) if sub_model is None: # edge case for saving a pipeline with safety_checker=None continue model_cls = sub_model.__class__ save_method_name = None # search for the model's base class in LOADABLE_CLASSES for library_name, library_classes in LOADABLE_CLASSES.items(): library = importlib.import_module(library_name) for base_class, save_load_methods in library_classes.items(): class_candidate = getattr(library, base_class, None) if class_candidate is not None and issubclass(model_cls, class_candidate): # if we found a suitable base class in LOADABLE_CLASSES then grab its save method save_method_name = save_load_methods[0] break if save_method_name is not None: break save_method = getattr(sub_model, save_method_name) expects_params = "params" in set(inspect.signature(save_method).parameters.keys()) if expects_params: save_method( os.path.join(save_directory, pipeline_component_name), params=params[pipeline_component_name] ) else: save_method(os.path.join(save_directory, pipeline_component_name)) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs): r""" Instantiate a Flax diffusion pipeline from pre-trained pipeline weights. The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning task. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those weights are discarded. Parameters: pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*): Can be either: - A string, the *repo id* of a pretrained pipeline hosted inside a model repo on https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like `CompVis/ldm-text2im-large-256`. - A path to a *directory* containing pipeline weights saved using [`~FlaxDiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`. dtype (`str` or `jnp.dtype`, *optional*): Override the default `jnp.dtype` and load the model under this dtype. If `"auto"` is passed the dtype will be automatically derived from the model's weights. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download of the model weights and configuration files, overriding the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received files. Will attempt to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. output_loading_info(`bool`, *optional*, defaults to `False`): Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages. local_files_only(`bool`, *optional*, defaults to `False`): Whether or not to only look at local files (i.e., do not try to download the model). use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. mirror (`str`, *optional*): Mirror source to accelerate downloads in China. If you are from China and have an accessibility problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety. Please refer to the mirror site for more information. specify the folder name here. kwargs (remaining dictionary of keyword arguments, *optional*): Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the specific pipeline class. The overwritten components are then directly passed to the pipelines `__init__` method. See example below for more information. <Tip> It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"` </Tip> <Tip> Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a firewalled environment. </Tip> Examples: ```py >>> from diffusers import FlaxDiffusionPipeline >>> # Download pipeline from huggingface.co and cache. >>> # Requires to be logged in to Hugging Face hub, >>> # see more in [the documentation](https://huggingface.co/docs/hub/security-tokens) >>> pipeline, params = FlaxDiffusionPipeline.from_pretrained( ... "runwayml/stable-diffusion-v1-5", ... revision="bf16", ... dtype=jnp.bfloat16, ... ) >>> # Download pipeline, but use a different scheduler >>> from diffusers import FlaxDPMSolverMultistepScheduler >>> model_id = "runwayml/stable-diffusion-v1-5" >>> dpmpp, dpmpp_state = FlaxDPMSolverMultistepScheduler.from_pretrained( ... model_id, ... subfolder="scheduler", ... ) >>> dpm_pipe, dpm_params = FlaxStableDiffusionPipeline.from_pretrained( ... model_id, revision="bf16", dtype=jnp.bfloat16, scheduler=dpmpp ... ) >>> dpm_params["scheduler"] = dpmpp_state ``` """ cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_auth_token = kwargs.pop("use_auth_token", None) revision = kwargs.pop("revision", None) from_pt = kwargs.pop("from_pt", False) dtype = kwargs.pop("dtype", None) # 1. Download the checkpoints and configs # use snapshot download here to get it working from from_pretrained if not os.path.isdir(pretrained_model_name_or_path): config_dict = cls.load_config( pretrained_model_name_or_path, cache_dir=cache_dir, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, use_auth_token=use_auth_token, revision=revision, ) # make sure we only download sub-folders and `diffusers` filenames folder_names = [k for k in config_dict.keys() if not k.startswith("_")] allow_patterns = [os.path.join(k, "*") for k in folder_names]
allow_patterns += [FLAX_WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, cls.config_name]
1
2023-11-14 23:29:31+00:00
16k
BraveGroup/Drive-WM
tests/pipelines/controlnet/test_controlnet_sdxl.py
[ { "identifier": "IMAGE_TO_IMAGE_IMAGE_PARAMS", "path": "tests/pipelines/pipeline_params.py", "snippet": "IMAGE_TO_IMAGE_IMAGE_PARAMS = frozenset([\"image\"])" }, { "identifier": "TEXT_TO_IMAGE_BATCH_PARAMS", "path": "tests/pipelines/pipeline_params.py", "snippet": "TEXT_TO_IMAGE_BATCH_PARAMS = frozenset([\"prompt\", \"negative_prompt\"])" }, { "identifier": "TEXT_TO_IMAGE_IMAGE_PARAMS", "path": "tests/pipelines/pipeline_params.py", "snippet": "TEXT_TO_IMAGE_IMAGE_PARAMS = frozenset([])" }, { "identifier": "TEXT_TO_IMAGE_PARAMS", "path": "tests/pipelines/pipeline_params.py", "snippet": "TEXT_TO_IMAGE_PARAMS = frozenset(\n [\n \"prompt\",\n \"height\",\n \"width\",\n \"guidance_scale\",\n \"negative_prompt\",\n \"prompt_embeds\",\n \"negative_prompt_embeds\",\n \"cross_attention_kwargs\",\n ]\n)" }, { "identifier": "PipelineKarrasSchedulerTesterMixin", "path": "tests/pipelines/test_pipelines_common.py", "snippet": "class PipelineKarrasSchedulerTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline that makes use of KarrasDiffusionSchedulers\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n def test_karras_schedulers_shape(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n # make sure that PNDM does not need warm-up\n pipe.scheduler.register_to_config(skip_prk_steps=True)\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"num_inference_steps\"] = 2\n\n if \"strength\" in inputs:\n inputs[\"num_inference_steps\"] = 4\n inputs[\"strength\"] = 0.5\n\n outputs = []\n for scheduler_enum in KarrasDiffusionSchedulers:\n if \"KDPM2\" in scheduler_enum.name:\n inputs[\"num_inference_steps\"] = 5\n\n scheduler_cls = getattr(diffusers, scheduler_enum.name)\n pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config)\n output = pipe(**inputs)[0]\n outputs.append(output)\n\n if \"KDPM2\" in scheduler_enum.name:\n inputs[\"num_inference_steps\"] = 2\n\n assert check_same_shape(outputs)" }, { "identifier": "PipelineLatentTesterMixin", "path": "tests/pipelines/test_pipelines_common.py", "snippet": "class PipelineLatentTesterMixin:\n \"\"\"\n This mixin is designed to be used with PipelineTesterMixin and unittest.TestCase classes.\n It provides a set of common tests for PyTorch pipeline that has vae, e.g.\n equivalence of different input and output types, etc.\n \"\"\"\n\n @property\n def image_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_params` in the child test class. \"\n \"`image_params` are tested for if all accepted input image types (i.e. `pt`,`pil`,`np`) are producing same results\"\n )\n\n @property\n def image_latents_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `image_latents_params` in the child test class. \"\n \"`image_latents_params` are tested for if passing latents directly are producing same results\"\n )\n\n def get_dummy_inputs_by_type(self, device, seed=0, input_image_type=\"pt\", output_type=\"np\"):\n inputs = self.get_dummy_inputs(device, seed)\n\n def convert_to_pt(image):\n if isinstance(image, torch.Tensor):\n input_image = image\n elif isinstance(image, np.ndarray):\n input_image = VaeImageProcessor.numpy_to_pt(image)\n elif isinstance(image, PIL.Image.Image):\n input_image = VaeImageProcessor.pil_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pt(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {type(image)}\")\n return input_image\n\n def convert_pt_to_type(image, input_image_type):\n if input_image_type == \"pt\":\n input_image = image\n elif input_image_type == \"np\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n elif input_image_type == \"pil\":\n input_image = VaeImageProcessor.pt_to_numpy(image)\n input_image = VaeImageProcessor.numpy_to_pil(input_image)\n else:\n raise ValueError(f\"unsupported input_image_type {input_image_type}.\")\n return input_image\n\n for image_param in self.image_params:\n if image_param in inputs.keys():\n inputs[image_param] = convert_pt_to_type(\n convert_to_pt(inputs[image_param]).to(device), input_image_type\n )\n\n inputs[\"output_type\"] = output_type\n\n return inputs\n\n def test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4):\n self._test_pt_np_pil_outputs_equivalent(expected_max_diff=expected_max_diff)\n\n def _test_pt_np_pil_outputs_equivalent(self, expected_max_diff=1e-4, input_image_type=\"pt\"):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n output_pt = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pt\")\n )[0]\n output_np = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"np\")\n )[0]\n output_pil = pipe(\n **self.get_dummy_inputs_by_type(torch_device, input_image_type=input_image_type, output_type=\"pil\")\n )[0]\n\n max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max()\n self.assertLess(\n max_diff, expected_max_diff, \"`output_type=='pt'` generate different results from `output_type=='np'`\"\n )\n\n max_diff = np.abs(np.array(output_pil[0]) - (output_np * 255).round()).max()\n self.assertLess(max_diff, 2.0, \"`output_type=='pil'` generate different results from `output_type=='np'`\")\n\n def test_pt_np_pil_inputs_equivalent(self):\n if len(self.image_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out_input_pt = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n out_input_np = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"np\"))[0]\n out_input_pil = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pil\"))[0]\n\n max_diff = np.abs(out_input_pt - out_input_np).max()\n self.assertLess(max_diff, 1e-4, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n max_diff = np.abs(out_input_pil - out_input_np).max()\n self.assertLess(max_diff, 1e-2, \"`input_type=='pt'` generate different result from `input_type=='np'`\")\n\n def test_latents_input(self):\n if len(self.image_latents_params) == 0:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\"))[0]\n\n vae = components[\"vae\"]\n inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type=\"pt\")\n generator = inputs[\"generator\"]\n for image_param in self.image_latents_params:\n if image_param in inputs.keys():\n inputs[image_param] = (\n vae.encode(inputs[image_param]).latent_dist.sample(generator) * vae.config.scaling_factor\n )\n out_latents_inputs = pipe(**inputs)[0]\n\n max_diff = np.abs(out - out_latents_inputs).max()\n self.assertLess(max_diff, 1e-4, \"passing latents as image input generate different result from passing image\")" }, { "identifier": "PipelineTesterMixin", "path": "tests/pipelines/test_pipelines_common.py", "snippet": "class PipelineTesterMixin:\n \"\"\"\n This mixin is designed to be used with unittest.TestCase classes.\n It provides a set of common tests for each PyTorch pipeline, e.g. saving and loading the pipeline,\n equivalence of dict and tuple outputs, etc.\n \"\"\"\n\n # Canonical parameters that are passed to `__call__` regardless\n # of the type of pipeline. They are always optional and have common\n # sense default values.\n required_optional_params = frozenset(\n [\n \"num_inference_steps\",\n \"num_images_per_prompt\",\n \"generator\",\n \"latents\",\n \"output_type\",\n \"return_dict\",\n ]\n )\n\n # set these parameters to False in the child class if the pipeline does not support the corresponding functionality\n test_attention_slicing = True\n\n test_xformers_attention = True\n\n def get_generator(self, seed):\n device = torch_device if torch_device != \"mps\" else \"cpu\"\n generator = torch.Generator(device).manual_seed(seed)\n return generator\n\n @property\n def pipeline_class(self) -> Union[Callable, DiffusionPipeline]:\n raise NotImplementedError(\n \"You need to set the attribute `pipeline_class = ClassNameOfPipeline` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_components(self):\n raise NotImplementedError(\n \"You need to implement `get_dummy_components(self)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n def get_dummy_inputs(self, device, seed=0):\n raise NotImplementedError(\n \"You need to implement `get_dummy_inputs(self, device, seed)` in the child test class. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `params` in the child test class. \"\n \"`params` are checked for if all values are present in `__call__`'s signature.\"\n \" You can set `params` using one of the common set of parameters defined in `pipeline_params.py`\"\n \" e.g., `TEXT_TO_IMAGE_PARAMS` defines the common parameters used in text to \"\n \"image pipelines, including prompts and prompt embedding overrides.\"\n \"If your pipeline's set of arguments has minor changes from one of the common sets of arguments, \"\n \"do not make modifications to the existing common sets of arguments. I.e. a text to image pipeline \"\n \"with non-configurable height and width arguments should set the attribute as \"\n \"`params = TEXT_TO_IMAGE_PARAMS - {'height', 'width'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def batch_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `batch_params` in the child test class. \"\n \"`batch_params` are the parameters required to be batched when passed to the pipeline's \"\n \"`__call__` method. `pipeline_params.py` provides some common sets of parameters such as \"\n \"`TEXT_TO_IMAGE_BATCH_PARAMS`, `IMAGE_VARIATION_BATCH_PARAMS`, etc... If your pipeline's \"\n \"set of batch arguments has minor changes from one of the common sets of batch arguments, \"\n \"do not make modifications to the existing common sets of batch arguments. I.e. a text to \"\n \"image pipeline `negative_prompt` is not batched should set the attribute as \"\n \"`batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {'negative_prompt'}`. \"\n \"See existing pipeline tests for reference.\"\n )\n\n @property\n def callback_cfg_params(self) -> frozenset:\n raise NotImplementedError(\n \"You need to set the attribute `callback_cfg_params` in the child test class that requires to run test_callback_cfg. \"\n \"`callback_cfg_params` are the parameters that needs to be passed to the pipeline's callback \"\n \"function when dynamically adjusting `guidance_scale`. They are variables that require special\"\n \"treatment when `do_classifier_free_guidance` is `True`. `pipeline_params.py` provides some common\"\n \" sets of parameters such as `TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS`. If your pipeline's \"\n \"set of cfg arguments has minor changes from one of the common sets of cfg arguments, \"\n \"do not make modifications to the existing common sets of cfg arguments. I.e. for inpaint pipeine, you \"\n \" need to adjust batch size of `mask` and `masked_image_latents` so should set the attribute as\"\n \"`callback_cfg_params = TEXT_TO_IMAGE_CFG_PARAMS.union({'mask', 'masked_image_latents'})`\"\n )\n\n def tearDown(self):\n # clean up the VRAM after each test in case of CUDA runtime errors\n super().tearDown()\n gc.collect()\n torch.cuda.empty_cache()\n\n def test_save_load_local(self, expected_max_difference=5e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n logger = logging.get_logger(\"diffusers.pipelines.pipeline_utils\")\n logger.setLevel(diffusers.logging.INFO)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n\n with CaptureLogger(logger) as cap_logger:\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n\n for name in pipe_loaded.components.keys():\n if name not in pipe_loaded._optional_components:\n assert name in str(cap_logger)\n\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_pipeline_call_signature(self):\n self.assertTrue(\n hasattr(self.pipeline_class, \"__call__\"), f\"{self.pipeline_class} should have a `__call__` method\"\n )\n\n parameters = inspect.signature(self.pipeline_class.__call__).parameters\n\n optional_parameters = set()\n\n for k, v in parameters.items():\n if v.default != inspect._empty:\n optional_parameters.add(k)\n\n parameters = set(parameters.keys())\n parameters.remove(\"self\")\n parameters.discard(\"kwargs\") # kwargs can be added if arguments of pipeline call function are deprecated\n\n remaining_required_parameters = set()\n\n for param in self.params:\n if param not in parameters:\n remaining_required_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_parameters) == 0,\n f\"Required parameters not present: {remaining_required_parameters}\",\n )\n\n remaining_required_optional_parameters = set()\n\n for param in self.required_optional_params:\n if param not in optional_parameters:\n remaining_required_optional_parameters.add(param)\n\n self.assertTrue(\n len(remaining_required_optional_parameters) == 0,\n f\"Required optional parameters not present: {remaining_required_optional_parameters}\",\n )\n\n def test_inference_batch_consistent(self, batch_sizes=[2]):\n self._test_inference_batch_consistent(batch_sizes=batch_sizes)\n\n def _test_inference_batch_consistent(\n self, batch_sizes=[2], additional_params_copy_to_batched_inputs=[\"num_inference_steps\"]\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # prepare batched inputs\n batched_inputs = []\n for batch_size in batch_sizes:\n batched_input = {}\n batched_input.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n # make unequal batch sizes\n batched_input[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n\n # make last batch super long\n batched_input[name][-1] = 100 * \"very long\"\n\n else:\n batched_input[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_input[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_input[\"batch_size\"] = batch_size\n\n batched_inputs.append(batched_input)\n\n logger.setLevel(level=diffusers.logging.WARNING)\n for batch_size, batched_input in zip(batch_sizes, batched_inputs):\n output = pipe(**batched_input)\n assert len(output[0]) == batch_size\n\n def test_inference_batch_single_identical(self, batch_size=3, expected_max_diff=1e-4):\n self._test_inference_batch_single_identical(batch_size=batch_size, expected_max_diff=expected_max_diff)\n\n def _test_inference_batch_single_identical(\n self,\n batch_size=2,\n expected_max_diff=1e-4,\n additional_params_copy_to_batched_inputs=[\"num_inference_steps\"],\n ):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for components in pipe.components.values():\n if hasattr(components, \"set_default_attn_processor\"):\n components.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is has been used in self.get_dummy_inputs\n inputs[\"generator\"] = self.get_generator(0)\n\n logger = logging.get_logger(pipe.__module__)\n logger.setLevel(level=diffusers.logging.FATAL)\n\n # batchify inputs\n batched_inputs = {}\n batched_inputs.update(inputs)\n\n for name in self.batch_params:\n if name not in inputs:\n continue\n\n value = inputs[name]\n if name == \"prompt\":\n len_prompt = len(value)\n batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)]\n batched_inputs[name][-1] = 100 * \"very long\"\n\n else:\n batched_inputs[name] = batch_size * [value]\n\n if \"generator\" in inputs:\n batched_inputs[\"generator\"] = [self.get_generator(i) for i in range(batch_size)]\n\n if \"batch_size\" in inputs:\n batched_inputs[\"batch_size\"] = batch_size\n\n for arg in additional_params_copy_to_batched_inputs:\n batched_inputs[arg] = inputs[arg]\n\n output = pipe(**inputs)\n output_batch = pipe(**batched_inputs)\n\n assert output_batch[0].shape[0] == batch_size\n\n max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max()\n assert max_diff < expected_max_diff\n\n def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n output = pipe(**self.get_dummy_inputs(generator_device))[0]\n output_tuple = pipe(**self.get_dummy_inputs(generator_device), return_dict=False)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_tuple)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n def test_components_function(self):\n init_components = self.get_dummy_components()\n init_components = {k: v for k, v in init_components.items() if not isinstance(v, (str, int, float))}\n\n pipe = self.pipeline_class(**init_components)\n\n self.assertTrue(hasattr(pipe, \"components\"))\n self.assertTrue(set(pipe.components.keys()) == set(init_components.keys()))\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_float16_inference(self, expected_max_diff=5e-2):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n components = self.get_dummy_components()\n pipe_fp16 = self.pipeline_class(**components)\n for component in pipe_fp16.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe_fp16.to(torch_device, torch.float16)\n pipe_fp16.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in inputs:\n inputs[\"generator\"] = self.get_generator(0)\n\n output = pipe(**inputs)[0]\n\n fp16_inputs = self.get_dummy_inputs(torch_device)\n # Reset generator in case it is used inside dummy inputs\n if \"generator\" in fp16_inputs:\n fp16_inputs[\"generator\"] = self.get_generator(0)\n\n output_fp16 = pipe_fp16(**fp16_inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_fp16)).max()\n self.assertLess(max_diff, expected_max_diff, \"The outputs of the fp16 and fp32 pipelines are too different.\")\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"float16 requires CUDA\")\n def test_save_load_float16(self, expected_max_diff=1e-2):\n components = self.get_dummy_components()\n for name, module in components.items():\n if hasattr(module, \"half\"):\n components[name] = module.to(torch_device).half()\n\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for name, component in pipe_loaded.components.items():\n if hasattr(component, \"dtype\"):\n self.assertTrue(\n component.dtype == torch.float16,\n f\"`{name}.dtype` switched from `float16` to {component.dtype} after loading.\",\n )\n\n inputs = self.get_dummy_inputs(torch_device)\n output_loaded = pipe_loaded(**inputs)[0]\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(\n max_diff, expected_max_diff, \"The output of the fp16 pipeline changed after saving and loading.\"\n )\n\n def test_save_load_optional_components(self, expected_max_difference=1e-4):\n if not hasattr(self.pipeline_class, \"_optional_components\"):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n # set all optional components to None\n for optional_component in pipe._optional_components:\n setattr(pipe, optional_component, None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir, safe_serialization=False)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(pipe_loaded, optional_component) is None,\n f\"`{optional_component}` did not stay set to None after loading.\",\n )\n\n inputs = self.get_dummy_inputs(generator_device)\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)\n\n @unittest.skipIf(torch_device != \"cuda\", reason=\"CUDA and CPU are required to switch devices\")\n def test_to_device(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n pipe.to(\"cpu\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cpu\" for device in model_devices))\n\n output_cpu = pipe(**self.get_dummy_inputs(\"cpu\"))[0]\n self.assertTrue(np.isnan(output_cpu).sum() == 0)\n\n pipe.to(\"cuda\")\n model_devices = [component.device.type for component in components.values() if hasattr(component, \"device\")]\n self.assertTrue(all(device == \"cuda\" for device in model_devices))\n\n output_cuda = pipe(**self.get_dummy_inputs(\"cuda\"))[0]\n self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0)\n\n def test_to_dtype(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.set_progress_bar_config(disable=None)\n\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes))\n\n pipe.to(torch_dtype=torch.float16)\n model_dtypes = [component.dtype for component in components.values() if hasattr(component, \"dtype\")]\n self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes))\n\n def test_attention_slicing_forward_pass(self, expected_max_diff=1e-3):\n self._test_attention_slicing_forward_pass(expected_max_diff=expected_max_diff)\n\n def _test_attention_slicing_forward_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-3\n ):\n if not self.test_attention_slicing:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_slicing = pipe(**inputs)[0]\n\n pipe.enable_attention_slicing(slice_size=1)\n inputs = self.get_dummy_inputs(generator_device)\n output_with_slicing = pipe(**inputs)[0]\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_slicing) - to_np(output_without_slicing)).max()\n self.assertLess(max_diff, expected_max_diff, \"Attention slicing should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(to_np(output_with_slicing[0]), to_np(output_without_slicing[0]))\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.14.0` or higher\",\n )\n def test_sequential_cpu_offload_forward_pass(self, expected_max_diff=1e-4):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_sequential_cpu_offload()\n\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0\"),\n reason=\"CPU offload is only available with CUDA and `accelerate v0.17.0` or higher\",\n )\n def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4):\n generator_device = \"cpu\"\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(generator_device)\n output_without_offload = pipe(**inputs)[0]\n\n pipe.enable_model_cpu_offload()\n inputs = self.get_dummy_inputs(generator_device)\n output_with_offload = pipe(**inputs)[0]\n\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"CPU offloading should not affect the inference results\")\n offloaded_modules = [\n v\n for k, v in pipe.components.items()\n if isinstance(v, torch.nn.Module) and k not in pipe._exclude_from_cpu_offload\n ]\n (\n self.assertTrue(all(v.device.type == \"cpu\" for v in offloaded_modules)),\n f\"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}\",\n )\n\n @unittest.skipIf(\n torch_device != \"cuda\" or not is_xformers_available(),\n reason=\"XFormers attention is only available with CUDA and `xformers` installed\",\n )\n def test_xformers_attention_forwardGenerator_pass(self):\n self._test_xformers_attention_forwardGenerator_pass()\n\n def _test_xformers_attention_forwardGenerator_pass(\n self, test_max_difference=True, test_mean_pixel_difference=True, expected_max_diff=1e-4\n ):\n if not self.test_xformers_attention:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n output_without_offload = pipe(**inputs)[0]\n output_without_offload = (\n output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload\n )\n\n pipe.enable_xformers_memory_efficient_attention()\n inputs = self.get_dummy_inputs(torch_device)\n output_with_offload = pipe(**inputs)[0]\n output_with_offload = (\n output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload\n )\n\n if test_max_difference:\n max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max()\n self.assertLess(max_diff, expected_max_diff, \"XFormers attention should not affect the inference results\")\n\n if test_mean_pixel_difference:\n assert_mean_pixel_difference(output_with_offload[0], output_without_offload[0])\n\n def test_progress_bar(self):\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n\n inputs = self.get_dummy_inputs(torch_device)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n stderr = stderr.getvalue()\n # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img,\n # so we just match \"5\" in \"#####| 1/5 [00:01<00:00]\"\n max_steps = re.search(\"/(.*?) \", stderr).group(1)\n self.assertTrue(max_steps is not None and len(max_steps) > 0)\n self.assertTrue(\n f\"{max_steps}/{max_steps}\" in stderr, \"Progress bar should be enabled and stopped at the max step\"\n )\n\n pipe.set_progress_bar_config(disable=True)\n with io.StringIO() as stderr, contextlib.redirect_stderr(stderr):\n _ = pipe(**inputs)\n self.assertTrue(stderr.getvalue() == \"\", \"Progress bar should be disabled\")\n\n def test_num_images_per_prompt(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"num_images_per_prompt\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n batch_sizes = [1, 2]\n num_images_per_prompts = [1, 2]\n\n for batch_size in batch_sizes:\n for num_images_per_prompt in num_images_per_prompts:\n inputs = self.get_dummy_inputs(torch_device)\n\n for key in inputs.keys():\n if key in self.batch_params:\n inputs[key] = batch_size * [inputs[key]]\n\n images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0]\n\n assert images.shape[0] == batch_size * num_images_per_prompt\n\n def test_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n inputs = self.get_dummy_inputs(torch_device)\n\n inputs[\"guidance_scale\"] = 1.0\n out_no_cfg = pipe(**inputs)[0]\n\n inputs[\"guidance_scale\"] = 7.5\n out_cfg = pipe(**inputs)[0]\n\n assert out_cfg.shape == out_no_cfg.shape\n\n def test_callback_inputs(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n has_callback_tensor_inputs = \"callback_on_step_end_tensor_inputs\" in sig.parameters\n has_callback_step_end = \"callback_on_step_end\" in sig.parameters\n\n if not (has_callback_tensor_inputs and has_callback_step_end):\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe = pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n self.assertTrue(\n hasattr(pipe, \"_callback_tensor_inputs\"),\n f\" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs\",\n )\n\n def callback_inputs_subset(pipe, i, t, callback_kwargs):\n # interate over callback args\n for tensor_name, tensor_value in callback_kwargs.items():\n # check that we're only passing in allowed tensor inputs\n assert tensor_name in pipe._callback_tensor_inputs\n\n return callback_kwargs\n\n def callback_inputs_all(pipe, i, t, callback_kwargs):\n for tensor_name in pipe._callback_tensor_inputs:\n assert tensor_name in callback_kwargs\n\n # interate over callback args\n for tensor_name, tensor_value in callback_kwargs.items():\n # check that we're only passing in allowed tensor inputs\n assert tensor_name in pipe._callback_tensor_inputs\n\n return callback_kwargs\n\n inputs = self.get_dummy_inputs(torch_device)\n\n # Test passing in a subset\n inputs[\"callback_on_step_end\"] = callback_inputs_subset\n inputs[\"callback_on_step_end_tensor_inputs\"] = [\"latents\"]\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n\n # Test passing in a everything\n inputs[\"callback_on_step_end\"] = callback_inputs_all\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n\n def callback_inputs_change_tensor(pipe, i, t, callback_kwargs):\n is_last = i == (pipe.num_timesteps - 1)\n if is_last:\n callback_kwargs[\"latents\"] = torch.zeros_like(callback_kwargs[\"latents\"])\n return callback_kwargs\n\n inputs[\"callback_on_step_end\"] = callback_inputs_change_tensor\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n inputs[\"output_type\"] = \"latent\"\n output = pipe(**inputs)[0]\n assert output.abs().sum() == 0\n\n def test_callback_cfg(self):\n sig = inspect.signature(self.pipeline_class.__call__)\n has_callback_tensor_inputs = \"callback_on_step_end_tensor_inputs\" in sig.parameters\n has_callback_step_end = \"callback_on_step_end\" in sig.parameters\n\n if not (has_callback_tensor_inputs and has_callback_step_end):\n return\n\n if \"guidance_scale\" not in sig.parameters:\n return\n\n components = self.get_dummy_components()\n pipe = self.pipeline_class(**components)\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n self.assertTrue(\n hasattr(pipe, \"_callback_tensor_inputs\"),\n f\" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs\",\n )\n\n def callback_increase_guidance(pipe, i, t, callback_kwargs):\n pipe._guidance_scale += 1.0\n\n return callback_kwargs\n\n inputs = self.get_dummy_inputs(torch_device)\n\n # use cfg guidance because some pipelines modify the shape of the latents\n # outside of the denoising loop\n inputs[\"guidance_scale\"] = 2.0\n inputs[\"callback_on_step_end\"] = callback_increase_guidance\n inputs[\"callback_on_step_end_tensor_inputs\"] = pipe._callback_tensor_inputs\n _ = pipe(**inputs)[0]\n\n # we increase the guidance scale by 1.0 at every step\n # check that the guidance scale is increased by the number of scheduler timesteps\n # accounts for models that modify the number of inference steps based on strength\n assert pipe.guidance_scale == (inputs[\"guidance_scale\"] + pipe.num_timesteps)" }, { "identifier": "SDXLOptionalComponentsTesterMixin", "path": "tests/pipelines/test_pipelines_common.py", "snippet": "class SDXLOptionalComponentsTesterMixin:\n def encode_prompt(\n self, tokenizers, text_encoders, prompt: str, num_images_per_prompt: int = 1, negative_prompt: str = None\n ):\n device = text_encoders[0].device\n\n if isinstance(prompt, str):\n prompt = [prompt]\n batch_size = len(prompt)\n\n prompt_embeds_list = []\n for tokenizer, text_encoder in zip(tokenizers, text_encoders):\n text_inputs = tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n text_input_ids = text_inputs.input_ids\n\n prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)\n pooled_prompt_embeds = prompt_embeds[0]\n prompt_embeds = prompt_embeds.hidden_states[-2]\n prompt_embeds_list.append(prompt_embeds)\n\n prompt_embeds = torch.concat(prompt_embeds_list, dim=-1)\n\n if negative_prompt is None:\n negative_prompt_embeds = torch.zeros_like(prompt_embeds)\n negative_pooled_prompt_embeds = torch.zeros_like(pooled_prompt_embeds)\n else:\n negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt\n\n negative_prompt_embeds_list = []\n for tokenizer, text_encoder in zip(tokenizers, text_encoders):\n uncond_input = tokenizer(\n negative_prompt,\n padding=\"max_length\",\n max_length=tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n negative_prompt_embeds = text_encoder(uncond_input.input_ids.to(device), output_hidden_states=True)\n negative_pooled_prompt_embeds = negative_prompt_embeds[0]\n negative_prompt_embeds = negative_prompt_embeds.hidden_states[-2]\n negative_prompt_embeds_list.append(negative_prompt_embeds)\n\n negative_prompt_embeds = torch.concat(negative_prompt_embeds_list, dim=-1)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # for classifier-free guidance\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(\n bs_embed * num_images_per_prompt, -1\n )\n\n # for classifier-free guidance\n negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view(\n bs_embed * num_images_per_prompt, -1\n )\n\n return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds\n\n def _test_save_load_optional_components(self, expected_max_difference=1e-4):\n components = self.get_dummy_components()\n\n pipe = self.pipeline_class(**components)\n for optional_component in pipe._optional_components:\n setattr(pipe, optional_component, None)\n\n for component in pipe.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe.to(torch_device)\n pipe.set_progress_bar_config(disable=None)\n\n generator_device = \"cpu\"\n inputs = self.get_dummy_inputs(generator_device)\n\n tokenizer = components.pop(\"tokenizer\")\n tokenizer_2 = components.pop(\"tokenizer_2\")\n text_encoder = components.pop(\"text_encoder\")\n text_encoder_2 = components.pop(\"text_encoder_2\")\n\n tokenizers = [tokenizer, tokenizer_2] if tokenizer is not None else [tokenizer_2]\n text_encoders = [text_encoder, text_encoder_2] if text_encoder is not None else [text_encoder_2]\n prompt = inputs.pop(\"prompt\")\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(tokenizers, text_encoders, prompt)\n inputs[\"prompt_embeds\"] = prompt_embeds\n inputs[\"negative_prompt_embeds\"] = negative_prompt_embeds\n inputs[\"pooled_prompt_embeds\"] = pooled_prompt_embeds\n inputs[\"negative_pooled_prompt_embeds\"] = negative_pooled_prompt_embeds\n\n output = pipe(**inputs)[0]\n\n with tempfile.TemporaryDirectory() as tmpdir:\n pipe.save_pretrained(tmpdir)\n pipe_loaded = self.pipeline_class.from_pretrained(tmpdir)\n for component in pipe_loaded.components.values():\n if hasattr(component, \"set_default_attn_processor\"):\n component.set_default_attn_processor()\n pipe_loaded.to(torch_device)\n pipe_loaded.set_progress_bar_config(disable=None)\n\n for optional_component in pipe._optional_components:\n self.assertTrue(\n getattr(pipe_loaded, optional_component) is None,\n f\"`{optional_component}` did not stay set to None after loading.\",\n )\n\n inputs = self.get_dummy_inputs(generator_device)\n _ = inputs.pop(\"prompt\")\n inputs[\"prompt_embeds\"] = prompt_embeds\n inputs[\"negative_prompt_embeds\"] = negative_prompt_embeds\n inputs[\"pooled_prompt_embeds\"] = pooled_prompt_embeds\n inputs[\"negative_pooled_prompt_embeds\"] = negative_pooled_prompt_embeds\n\n output_loaded = pipe_loaded(**inputs)[0]\n\n max_diff = np.abs(to_np(output) - to_np(output_loaded)).max()\n self.assertLess(max_diff, expected_max_difference)" } ]
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, slow, torch_device from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, )
11,424
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS
image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
2
2023-11-18 01:40:55+00:00
16k
BAAI-DCAI/SegVol
inference_demo.py
[ { "identifier": "sam_model_registry", "path": "segment_anything_volumetric/build_sam.py", "snippet": "def build_sam_vit_3d(args, checkpoint=None):\ndef _build_sam(\n image_encoder_type,\n embed_dim,\n patch_size,\n checkpoint,\n image_size,\n):" }, { "identifier": "SegVol", "path": "network/model.py", "snippet": "class SegVol(nn.Module):\n def __init__(self, \n image_encoder, \n mask_decoder,\n prompt_encoder,\n clip_ckpt,\n roi_size,\n patch_size,\n test_mode=False,\n ):\n super().__init__()\n self.image_encoder = image_encoder\n self.mask_decoder = mask_decoder\n self.prompt_encoder = prompt_encoder\n self.text_encoder = TextEncoder(clip_ckpt)\n self.feat_shape = np.array(roi_size)/np.array(patch_size)\n self.test_mode = test_mode\n self.dice_loss = BinaryDiceLoss().cuda()\n self.bce_loss = BCELoss().cuda()\n self.decoder_iter = 6\n\n def forward(self, image, text=None, boxes=None, points=None, **kwargs):\n bs = image.shape[0]\n img_shape = (image.shape[2], image.shape[3], image.shape[4])\n image_embedding, _ = self.image_encoder(image)\n image_embedding = image_embedding.transpose(1, 2).view(bs, -1, \n int(self.feat_shape[0]), int(self.feat_shape[1]), int(self.feat_shape[2]))\n # test mode\n if self.test_mode:\n return self.forward_decoder(image_embedding, img_shape, text, boxes, points)\n \n # train mode\n ## sl\n sl_loss = self.supervised_forward(image, image_embedding, img_shape, kwargs['train_organs'], kwargs['train_labels'])\n ## ssl\n ssl_loss = self.unsupervised_forward(image, image_embedding, kwargs['pseudo_seg_cleaned'], img_shape)\n return sl_loss, ssl_loss\n\n def forward_decoder(self, image_embedding, img_shape, text=None, boxes=None, points=None):\n with torch.no_grad():\n if boxes is not None:\n if len(boxes.shape) == 2:\n boxes = boxes[:, None, :] # (B, 1, 6)\n if text is not None:\n text_embedding = self.text_encoder(text) # (B, 768)\n else:\n text_embedding = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=None,\n text_embedding=text_embedding,\n )\n\n dense_pe = self.prompt_encoder.get_dense_pe()\n low_res_masks, _ = self.mask_decoder(\n image_embeddings=image_embedding,\n text_embedding = text_embedding,\n image_pe=dense_pe,\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=False,\n )\n logits = F.interpolate(low_res_masks, size=img_shape, mode='trilinear', align_corners=False)\n return logits\n\n def supervised_forward(self, image, image_embedding, img_shape, training_organs, train_labels):\n iter_points, iter_bboxes, iter_organs = self.build_prompt_label(image.shape[0], training_organs, train_labels)\n # select prompt\n prompt_options = [[None, iter_points, iter_organs], [iter_bboxes, None, iter_organs], \n [None, None, iter_organs], [iter_bboxes, None, None], [None, iter_points, None],\n [iter_bboxes, iter_points, None]]\n sl_loss = 0\n for prompt in prompt_options:\n bboxes, points, organs = prompt\n logits = self.forward_decoder(image_embedding, img_shape, text=organs, boxes=bboxes, points=points)\n # cal loss\n sl_loss_dice = self.dice_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss_bce = self.bce_loss.forward(logits.squeeze().float(), train_labels.squeeze().float())\n sl_loss += sl_loss_dice + sl_loss_bce\n return sl_loss\n \n def unsupervised_forward(self, image, image_embedding, pseudo_seg_cleaned, img_shape):\n sll_loss = 0\n for iter in range(self.decoder_iter):\n if iter % 2 == 0:\n pseudo_labels, pseudo_points_prompt = self.build_pseudo_point_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=None, points=pseudo_points_prompt)\n else:\n pseudo_labels, pseudo_bboxes_prompt = self.build_pseudo_box_prompt_label(image.shape, pseudo_seg_cleaned)\n logits = self.forward_decoder(image_embedding, img_shape, text=None, boxes=pseudo_bboxes_prompt, points=None)\n # cal loss\n sll_loss_dice = self.dice_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss_bce = self.bce_loss.forward(logits.squeeze().float(), pseudo_labels.squeeze().float())\n sll_loss += sll_loss_dice + sll_loss_bce\n return sll_loss\n\n def build_prompt_label(self, bs, training_organs, train_labels):\n # generate prompt & label\n iter_organs = []\n iter_bboxes = []\n iter_points_ax = []\n iter_point_labels = []\n for sample_idx in range(bs):\n # organ prompt\n iter_organs.append(training_organs)\n # box prompt\n box = generate_box(train_labels[sample_idx])\n iter_bboxes.append(box)\n # point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(0, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n point, point_label = select_points(\n train_labels[sample_idx],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n iter_points_ax.append(point)\n iter_point_labels.append(point_label)\n # batched prompt\n iter_points_ax = torch.stack(iter_points_ax, dim=0).cuda()\n iter_point_labels = torch.stack(iter_point_labels, dim=0).cuda()\n iter_points = (iter_points_ax, iter_point_labels)\n iter_bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return iter_points, iter_bboxes, iter_organs\n \n def build_pseudo_point_prompt_label(self, input_shape, seg_labels):\n pseudo_labels = torch.zeros(input_shape).cuda()\n # generate points\n points = []\n point_labels = []\n for batch_idx in range(input_shape[0]):\n # generate pseudo label\n unique_ids = torch.unique(seg_labels[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels[batch_idx]==region_id] = 1\n # generate point prompt\n num_positive_extra_max, num_negative_extra_max = 10, 10\n num_positive_extra = random.randint(4, num_positive_extra_max)\n num_negative_extra = random.randint(0, num_negative_extra_max)\n assert len(pseudo_labels[batch_idx][0].shape) == 3\n point, point_label = select_points(\n pseudo_labels[batch_idx][0],\n num_positive_extra=num_positive_extra,\n num_negative_extra=num_negative_extra,\n fix_extra_point_num=num_positive_extra_max + num_negative_extra_max)\n points.append(point)\n point_labels.append(point_label)\n points = torch.stack(points, dim=0).cuda()\n point_labels = torch.stack(point_labels, dim=0).cuda()\n pseudo_points_prompt = (points, point_labels)\n return pseudo_labels, pseudo_points_prompt\n\n def build_pseudo_box_prompt_label(self, input_shape, seg_labels_cleaned):\n pseudo_labels = torch.zeros(input_shape).cuda()\n iter_bboxes = []\n # generate boxes\n for batch_idx in range(input_shape[0]):\n # generate ori pseudo label\n unique_ids = torch.unique(seg_labels_cleaned[batch_idx])\n unique_ids = unique_ids[unique_ids != -1]\n region_id = random.choice(unique_ids).item()\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==region_id] = 1\n # generate box prompt\n box = generate_box(pseudo_labels[batch_idx][0])\n iter_bboxes.append(box)\n # refine pseudo label\n x_min, y_min, z_min, x_max, y_max, z_max = box\n binary_cube = torch.zeros_like(pseudo_labels[batch_idx][0]).int()\n binary_cube[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1] = 1\n # cal iou\n mask_label = seg_labels_cleaned[batch_idx][0]\n assert binary_cube.shape == mask_label.shape, str(binary_cube.shape) + ' ' + str(mask_label.shape)\n mask_values_in_binary_cube = mask_label[binary_cube == 1]\n unique_mask_values = torch.unique(mask_values_in_binary_cube)\n # print('unique_mask_values ', unique_mask_values)\n for value in unique_mask_values:\n if value == -1: continue\n mask_area = (mask_label == value)\n intersection = (binary_cube & mask_area)\n iou = intersection.float().sum() / mask_area.float().sum()\n if iou > 0.90:\n # print(f\"Mask value {value} has IOU > 0.90 in binary cube.\")\n pseudo_labels[batch_idx][seg_labels_cleaned[batch_idx]==value] = 1\n\n bboxes = torch.stack(iter_bboxes, dim=0).float().cuda()\n return pseudo_labels, bboxes" }, { "identifier": "process_ct_gt", "path": "data_process/demo_data_process.py", "snippet": "def process_ct_gt(case_path, label_path, category, spatial_size):\n print('Data preprocessing...')\n # transform\n img_loader = transforms.LoadImage()\n transform = transforms.Compose(\n [\n transforms.Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n ForegroundNormalization(keys=[\"image\"]),\n DimTranspose(keys=[\"image\", \"label\"]),\n MinMaxNormalization(),\n transforms.SpatialPadd(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='constant'),\n transforms.CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n transforms.ToTensord(keys=[\"image\", \"label\"]),\n ]\n )\n zoom_out_transform = transforms.Resized(keys=[\"image\", \"label\"], spatial_size=spatial_size, mode='nearest-exact')\n\n ###\n item = {}\n # generate ct_voxel_ndarray\n ct_voxel_ndarray, _ = img_loader(case_path)\n print(type(ct_voxel_ndarray))\n ct_voxel_ndarray = np.array(ct_voxel_ndarray).squeeze()\n ct_shape = ct_voxel_ndarray.shape\n ct_voxel_ndarray = np.expand_dims(ct_voxel_ndarray, axis=0)\n item['image'] = ct_voxel_ndarray\n\n # generate gt_voxel_ndarray\n gt_voxel_ndarray, _ = img_loader(label_path)\n gt_voxel_ndarray = np.array(gt_voxel_ndarray)\n present_categories = np.unique(gt_voxel_ndarray)\n gt_masks = []\n for cls_idx in range(len(category)):\n # ignore background\n cls = cls_idx + 1\n if cls not in present_categories:\n gt_voxel_ndarray_category = np.zeros(ct_shape)\n gt_masks.append(gt_voxel_ndarray_category)\n else:\n gt_voxel_ndarray_category = gt_voxel_ndarray.copy()\n gt_voxel_ndarray_category[gt_voxel_ndarray != cls] = 0\n gt_voxel_ndarray_category[gt_voxel_ndarray == cls] = 1\n gt_masks.append(gt_voxel_ndarray_category)\n gt_voxel_ndarray = np.stack(gt_masks, axis=0)\n assert gt_voxel_ndarray.shape[0] == len(category) and gt_voxel_ndarray.shape[1:] == ct_voxel_ndarray.shape[1:]\n item['label'] = gt_voxel_ndarray.astype(np.int32)\n\n # transform\n item = transform(item)\n item_zoom_out = zoom_out_transform(item)\n item['zoom_out_image'] = item_zoom_out['image']\n item['zoom_out_label'] = item_zoom_out['label']\n print( 'Zoom_in image shape: ', item['image'].shape, \n '\\nZoom_in label shape: ', item['label'].shape,\n '\\nZoom_out image shape: ', item['zoom_out_image'].shape,\n '\\nZoom_out label shape: ', item['zoom_out_label'].shape,\n )\n return item" }, { "identifier": "sliding_window_inference", "path": "utils/monai_inferers_utils.py", "snippet": "def sliding_window_inference(\n inputs: torch.Tensor,\n prompt_reflection: Union[torch.Tensor, Tuple[torch.Tensor, ...]],\n roi_size: Union[Sequence[int], int],\n sw_batch_size: int,\n predictor: Callable[..., Union[torch.Tensor, Sequence[torch.Tensor], Dict[Any, torch.Tensor]]],\n overlap: float = 0.25,\n mode: Union[BlendMode, str] = BlendMode.CONSTANT,\n sigma_scale: Union[Sequence[float], float] = 0.125,\n padding_mode: Union[PytorchPadMode, str] = PytorchPadMode.CONSTANT,\n cval: float = 0.0,\n sw_device: Union[torch.device, str, None] = None,\n device: Union[torch.device, str, None] = None,\n progress: bool = False,\n roi_weight_map: Union[torch.Tensor, None] = None,\n *args: Any,\n **kwargs: Any,\n) -> Union[torch.Tensor, Tuple[torch.Tensor, ...], Dict[Any, torch.Tensor]]:\n \"\"\"\n Sliding window inference on `inputs` with `predictor`.\n\n The outputs of `predictor` could be a tensor, a tuple, or a dictionary of tensors.\n Each output in the tuple or dict value is allowed to have different resolutions with respect to the input.\n e.g., the input patch spatial size is [128,128,128], the output (a tuple of two patches) patch sizes\n could be ([128,64,256], [64,32,128]).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen to ensure the output ROI is still\n an integer. If the predictor's input and output spatial sizes are not equal, we recommend choosing the parameters\n so that `overlap*roi_size*output_size/input_size` is an integer (for each spatial dimension).\n\n When roi_size is larger than the inputs' spatial size, the input image are padded during inference.\n To maintain the same spatial sizes, the output image will be cropped to the original input size.\n\n Args:\n inputs: input image to be processed (assuming NCHW[D])\n roi_size: the spatial window size for inferences.\n When its components have None or non-positives, the corresponding inputs dimension will be used.\n if the components of the `roi_size` are non-positive values, the transform will use the\n corresponding components of img size. For example, `roi_size=(32, -1)` will be adapted\n to `(32, 64)` if the second spatial dimension size of img is `64`.\n sw_batch_size: the batch size to run window slices.\n predictor: given input tensor ``patch_data`` in shape NCHW[D],\n The outputs of the function call ``predictor(patch_data)`` should be a tensor, a tuple, or a dictionary\n with Tensor values. Each output in the tuple or dict value should have the same batch_size, i.e. NM'H'W'[D'];\n where H'W'[D'] represents the output patch's spatial size, M is the number of output channels,\n N is `sw_batch_size`, e.g., the input shape is (7, 1, 128,128,128),\n the output could be a tuple of two tensors, with shapes: ((7, 5, 128, 64, 256), (7, 4, 64, 32, 128)).\n In this case, the parameter `overlap` and `roi_size` need to be carefully chosen\n to ensure the scaled output ROI sizes are still integers.\n If the `predictor`'s input and output spatial sizes are different,\n we recommend choosing the parameters so that ``overlap*roi_size*zoom_scale`` is an integer for each dimension.\n overlap: Amount of overlap between scans.\n mode: {``\"constant\"``, ``\"gaussian\"``}\n How to blend output of overlapping windows. Defaults to ``\"constant\"``.\n\n - ``\"constant``\": gives equal weight to all predictions.\n - ``\"gaussian``\": gives less weight to predictions on edges of windows.\n\n sigma_scale: the standard deviation coefficient of the Gaussian window when `mode` is ``\"gaussian\"``.\n Default: 0.125. Actual window sigma is ``sigma_scale`` * ``dim_size``.\n When sigma_scale is a sequence of floats, the values denote sigma_scale at the corresponding\n spatial dimensions.\n padding_mode: {``\"constant\"``, ``\"reflect\"``, ``\"replicate\"``, ``\"circular\"``}\n Padding mode for ``inputs``, when ``roi_size`` is larger than inputs. Defaults to ``\"constant\"``\n See also: https://pytorch.org/docs/stable/generated/torch.nn.functional.pad.html\n cval: fill value for 'constant' padding mode. Default: 0\n sw_device: device for the window data.\n By default the device (and accordingly the memory) of the `inputs` is used.\n Normally `sw_device` should be consistent with the device where `predictor` is defined.\n device: device for the stitched output prediction.\n By default the device (and accordingly the memory) of the `inputs` is used. If for example\n set to device=torch.device('cpu') the gpu memory consumption is less and independent of the\n `inputs` and `roi_size`. Output is on the `device`.\n progress: whether to print a `tqdm` progress bar.\n roi_weight_map: pre-computed (non-negative) weight map for each ROI.\n If not given, and ``mode`` is not `constant`, this map will be computed on the fly.\n args: optional args to be passed to ``predictor``.\n kwargs: optional keyword args to be passed to ``predictor``.\n\n Note:\n - input must be channel-first and have a batch dim, supports N-D sliding window.\n\n \"\"\"\n print('sliding window inference for ROI')\n text = kwargs['text']\n use_box = kwargs['use_box']\n use_point = kwargs['use_point']\n assert not (use_box and use_point)\n compute_dtype = inputs.dtype\n num_spatial_dims = len(inputs.shape) - 2\n if overlap < 0 or overlap >= 1:\n raise ValueError(\"overlap must be >= 0 and < 1.\")\n\n # determine image spatial size and batch size\n # Note: all input images must have the same image size and batch size\n batch_size, _, *image_size_ = inputs.shape\n\n if device is None:\n device = inputs.device\n if sw_device is None:\n sw_device = inputs.device\n\n roi_size = fall_back_tuple(roi_size, image_size_)\n # in case that image size is smaller than roi size\n image_size = tuple(max(image_size_[i], roi_size[i]) for i in range(num_spatial_dims))\n pad_size = []\n for k in range(len(inputs.shape) - 1, 1, -1):\n diff = max(roi_size[k - 2] - inputs.shape[k], 0)\n half = diff // 2\n pad_size.extend([half, diff - half])\n inputs = F.pad(inputs, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n if use_point or use_box:\n binary_prompt_map, global_preds = prompt_reflection\n global_preds = F.pad(global_preds, pad=pad_size, mode=look_up_option(padding_mode, PytorchPadMode).value, value=cval)\n #############\n scan_interval = _get_scan_interval(image_size, roi_size, num_spatial_dims, overlap)\n\n # Store all slices in list\n slices = dense_patch_slices(image_size, roi_size, scan_interval)\n num_win = len(slices) # number of windows per image\n total_slices = num_win * batch_size # total number of windows\n\n # Create window-level importance map\n valid_patch_size = get_valid_patch_size(image_size, roi_size)\n if valid_patch_size == roi_size and (roi_weight_map is not None):\n importance_map = roi_weight_map\n else:\n try:\n importance_map = compute_importance_map(valid_patch_size, mode=mode, sigma_scale=sigma_scale, device=device)\n except BaseException as e:\n raise RuntimeError(\n \"Seems to be OOM. Please try smaller patch size or mode='constant' instead of mode='gaussian'.\"\n ) from e\n importance_map = convert_data_type(importance_map, torch.Tensor, device, compute_dtype)[0] # type: ignore\n # handle non-positive weights\n min_non_zero = max(importance_map[importance_map != 0].min().item(), 1e-3)\n importance_map = torch.clamp(importance_map.to(torch.float32), min=min_non_zero).to(compute_dtype)\n\n # Perform predictions\n dict_key, output_image_list, count_map_list = None, [], []\n _initialized_ss = -1\n is_tensor_output = True # whether the predictor's output is a tensor (instead of dict/tuple)\n\n # for each patch\n for slice_g in tqdm(range(0, total_slices, sw_batch_size)) if progress else range(0, total_slices, sw_batch_size):\n slice_range = range(slice_g, min(slice_g + sw_batch_size, total_slices))\n unravel_slice = [\n [slice(int(idx / num_win), int(idx / num_win) + 1), slice(None)] + list(slices[idx % num_win])\n for idx in slice_range\n ]\n window_data = torch.cat([inputs[win_slice] for win_slice in unravel_slice]).to(sw_device)\n #############\n \n boxes = None\n points = None\n if use_point:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n point, point_label = select_points(window_binary_prompt_map.squeeze())\n points = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) \n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n if use_box:\n if num_win == 1:\n window_binary_prompt_map = torch.cat([binary_prompt_map[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(window_binary_prompt_map.squeeze()).unsqueeze(0).float().cuda()\n else:\n pseudo_label = torch.cat([global_preds[win_slice] for win_slice in unravel_slice]).to(sw_device)\n boxes = generate_box(pseudo_label.squeeze()).unsqueeze(0).float().cuda()\n seg_prob_out = predictor(window_data, text, boxes, points) # batched patch segmentation\n #############\n # convert seg_prob_out to tuple seg_prob_tuple, this does not allocate new memory.\n seg_prob_tuple: Tuple[torch.Tensor, ...]\n if isinstance(seg_prob_out, torch.Tensor):\n seg_prob_tuple = (seg_prob_out,)\n elif isinstance(seg_prob_out, Mapping):\n if dict_key is None:\n dict_key = sorted(seg_prob_out.keys()) # track predictor's output keys\n seg_prob_tuple = tuple(seg_prob_out[k] for k in dict_key)\n is_tensor_output = False\n else:\n seg_prob_tuple = ensure_tuple(seg_prob_out)\n is_tensor_output = False\n\n # for each output in multi-output list\n for ss, seg_prob in enumerate(seg_prob_tuple):\n seg_prob = seg_prob.to(device) # BxCxMxNxP or BxCxMxN\n\n # compute zoom scale: out_roi_size/in_roi_size\n zoom_scale = []\n for axis, (img_s_i, out_w_i, in_w_i) in enumerate(\n zip(image_size, seg_prob.shape[2:], window_data.shape[2:])\n ):\n _scale = out_w_i / float(in_w_i)\n if not (img_s_i * _scale).is_integer():\n warnings.warn(\n f\"For spatial axis: {axis}, output[{ss}] will have non-integer shape. Spatial \"\n f\"zoom_scale between output[{ss}] and input is {_scale}. Please pad inputs.\"\n )\n zoom_scale.append(_scale)\n\n if _initialized_ss < ss: # init. the ss-th buffer at the first iteration\n # construct multi-resolution outputs\n output_classes = seg_prob.shape[1]\n output_shape = [batch_size, output_classes] + [\n int(image_size_d * zoom_scale_d) for image_size_d, zoom_scale_d in zip(image_size, zoom_scale)\n ]\n # allocate memory to store the full output and the count for overlapping parts\n output_image_list.append(torch.zeros(output_shape, dtype=compute_dtype, device=device))\n count_map_list.append(torch.zeros([1, 1] + output_shape[2:], dtype=compute_dtype, device=device))\n _initialized_ss += 1\n\n # resizing the importance_map\n resizer = Resize(spatial_size=seg_prob.shape[2:], mode=\"nearest\", anti_aliasing=False)\n\n # store the result in the proper location of the full output. Apply weights from importance map.\n for idx, original_idx in zip(slice_range, unravel_slice):\n # zoom roi\n original_idx_zoom = list(original_idx) # 4D for 2D image, 5D for 3D image\n for axis in range(2, len(original_idx_zoom)):\n zoomed_start = original_idx[axis].start * zoom_scale[axis - 2]\n zoomed_end = original_idx[axis].stop * zoom_scale[axis - 2]\n if not zoomed_start.is_integer() or (not zoomed_end.is_integer()):\n warnings.warn(\n f\"For axis-{axis-2} of output[{ss}], the output roi range is not int. \"\n f\"Input roi range is ({original_idx[axis].start}, {original_idx[axis].stop}). \"\n f\"Spatial zoom_scale between output[{ss}] and input is {zoom_scale[axis - 2]}. \"\n f\"Corresponding output roi range is ({zoomed_start}, {zoomed_end}).\\n\"\n f\"Please change overlap ({overlap}) or roi_size ({roi_size[axis-2]}) for axis-{axis-2}. \"\n \"Tips: if overlap*roi_size*zoom_scale is an integer, it usually works.\"\n )\n original_idx_zoom[axis] = slice(int(zoomed_start), int(zoomed_end), None)\n importance_map_zoom = resizer(importance_map.unsqueeze(0))[0].to(compute_dtype)\n # store results and weights\n output_image_list[ss][original_idx_zoom] += importance_map_zoom * seg_prob[idx - slice_g]\n count_map_list[ss][original_idx_zoom] += (\n importance_map_zoom.unsqueeze(0).unsqueeze(0).expand(count_map_list[ss][original_idx_zoom].shape)\n )\n\n # account for any overlapping sections\n for ss in range(len(output_image_list)):\n output_image_list[ss] = (output_image_list[ss] / count_map_list.pop(0)).to(compute_dtype)\n\n # remove padding if image_size smaller than roi_size\n for ss, output_i in enumerate(output_image_list):\n if torch.isnan(output_i).any() or torch.isinf(output_i).any():\n warnings.warn(\"Sliding window inference results contain NaN or Inf.\")\n\n zoom_scale = [\n seg_prob_map_shape_d / roi_size_d for seg_prob_map_shape_d, roi_size_d in zip(output_i.shape[2:], roi_size)\n ]\n\n final_slicing: List[slice] = []\n for sp in range(num_spatial_dims):\n slice_dim = slice(pad_size[sp * 2], image_size_[num_spatial_dims - sp - 1] + pad_size[sp * 2])\n slice_dim = slice(\n int(round(slice_dim.start * zoom_scale[num_spatial_dims - sp - 1])),\n int(round(slice_dim.stop * zoom_scale[num_spatial_dims - sp - 1])),\n )\n final_slicing.insert(0, slice_dim)\n while len(final_slicing) < len(output_i.shape):\n final_slicing.insert(0, slice(None))\n output_image_list[ss] = output_i[final_slicing]\n\n if dict_key is not None: # if output of predictor is a dict\n final_output = dict(zip(dict_key, output_image_list))\n else:\n final_output = tuple(output_image_list) # type: ignore\n return final_output[0] if is_tensor_output else final_output # type: ignore" }, { "identifier": "generate_box", "path": "utils/monai_inferers_utils.py", "snippet": "def generate_box(pred_pre, bbox_shift=None):\n meaning_post_label = pred_pre # [h, w, d]\n ones_idx = (meaning_post_label > 0).nonzero(as_tuple=True)\n if all(tensor.nelement() == 0 for tensor in ones_idx):\n bboxes = torch.tensor([-1,-1,-1,-1,-1,-1])\n # print(bboxes, bboxes.shape)\n return bboxes\n min_coords = [dim.min() for dim in ones_idx] # [x_min, y_min, z_min]\n max_coords = [dim.max() for dim in ones_idx] # [x_max, y_max, z_max]\n\n\n if bbox_shift is None:\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor)\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor)\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)\n else:\n # add perturbation to bounding box coordinates\n corner_min = []\n corner_max = []\n shape = meaning_post_label.shape\n for coor in min_coords:\n coor_ = max(0, coor + random.randint(-bbox_shift, bbox_shift))\n corner_min.append(coor_)\n for idx, coor in enumerate(max_coords):\n coor_ = min(shape[idx], coor + random.randint(-bbox_shift, bbox_shift))\n corner_max.append(coor_)\n corner_min = torch.tensor(corner_min)\n corner_max = torch.tensor(corner_max)\n return torch.cat((corner_min, corner_max), dim=0)" }, { "identifier": "select_points", "path": "utils/monai_inferers_utils.py", "snippet": "def select_points(preds, num_positive_extra=4, num_negative_extra=0, fix_extra_point_num=None):\n spacial_dim = 3\n points = torch.zeros((0, 3))\n labels = torch.zeros((0))\n pos_thred = 0.9\n neg_thred = 0.1\n \n # get pos/net indices\n positive_indices = torch.nonzero(preds > pos_thred, as_tuple=True) # ([pos x], [pos y], [pos z])\n negative_indices = torch.nonzero(preds < neg_thred, as_tuple=True)\n\n ones_idx = (preds > pos_thred).nonzero(as_tuple=True)\n if all(tmp.nelement() == 0 for tmp in ones_idx):\n # all neg\n num_positive_extra = 0\n selected_positive_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n else:\n # random select a pos point\n random_idx = torch.randint(len(positive_indices[0]), (1,))\n selected_positive_point = torch.tensor([positive_indices[i][random_idx] for i in range(spacial_dim)]).unsqueeze(dim=0)\n points = torch.cat((points, selected_positive_point), dim=0)\n labels = torch.cat((labels, torch.ones((1))))\n\n if num_positive_extra > 0:\n pos_idx_list = torch.randperm(len(positive_indices[0]))[:num_positive_extra]\n extra_positive_points = []\n for pos_idx in pos_idx_list:\n extra_positive_points.append([positive_indices[i][pos_idx] for i in range(spacial_dim)])\n extra_positive_points = torch.tensor(extra_positive_points).reshape(-1, 3)\n points = torch.cat((points, extra_positive_points), dim=0)\n labels = torch.cat((labels, torch.ones((extra_positive_points.shape[0]))))\n\n if num_negative_extra > 0:\n neg_idx_list = torch.randperm(len(negative_indices[0]))[:num_negative_extra]\n extra_negative_points = []\n for neg_idx in neg_idx_list:\n extra_negative_points.append([negative_indices[i][neg_idx] for i in range(spacial_dim)])\n extra_negative_points = torch.tensor(extra_negative_points).reshape(-1, 3)\n points = torch.cat((points, extra_negative_points), dim=0)\n labels = torch.cat((labels, torch.zeros((extra_negative_points.shape[0]))))\n # print('extra_negative_points ', extra_negative_points, extra_negative_points.shape)\n # print('==> points ', points.shape, labels)\n \n if fix_extra_point_num is None:\n left_point_num = num_positive_extra + num_negative_extra + 1 - labels.shape[0]\n else:\n left_point_num = fix_extra_point_num + 1 - labels.shape[0]\n\n for _ in range(left_point_num):\n ignore_point = torch.tensor([-1,-1,-1]).unsqueeze(dim=0)\n points = torch.cat((points, ignore_point), dim=0)\n labels = torch.cat((labels, torch.tensor([-1]).reshape(1)))\n\n return (points, labels)" }, { "identifier": "build_binary_cube", "path": "utils/monai_inferers_utils.py", "snippet": "def build_binary_cube(bbox, binary_cube_shape):\n min_coord = bbox[0][:3].int().tolist()\n max_coord = bbox[0][3:].int().tolist()\n binary_cube = torch.zeros(binary_cube_shape)\n binary_cube[min_coord[0]:max_coord[0]+1, min_coord[1]:max_coord[1]+1, min_coord[2]:max_coord[2]+1] = 1\n return binary_cube" }, { "identifier": "build_binary_points", "path": "utils/monai_inferers_utils.py", "snippet": "def build_binary_points(points, labels, shape):\n binary_points = torch.zeros(shape, dtype=torch.int16)\n binary_points[points[labels == 1, 0].long(), points[labels == 1, 1].long(), points[labels == 1, 2].long()] = 1\n return binary_points" }, { "identifier": "logits2roi_coor", "path": "utils/monai_inferers_utils.py", "snippet": "def logits2roi_coor(spatial_size, logits_global_single):\n # crop predict\n pred_global_single = torch.sigmoid(logits_global_single) > 0.5\n ## get all pos idx\n nonzero_indices = torch.nonzero(pred_global_single)\n if nonzero_indices.shape[0] == 0:\n return None, None, None, None, None, None\n ## get boundary\n min_d, max_d = nonzero_indices[:, 0].min(), nonzero_indices[:, 0].max()\n min_h, max_h = nonzero_indices[:, 1].min(), nonzero_indices[:, 1].max()\n min_w, max_w = nonzero_indices[:, 2].min(), nonzero_indices[:, 2].max()\n ## padding\n crop_d, crop_h, crop_w = max_d - min_d + 1, max_h - min_h + 1, max_w - min_w + 1,\n window_d, window_h, window_w = spatial_size\n padding_d, padding_h, padding_w = max(0, window_d-crop_d), max(0, window_h-crop_h), max(0, window_w-crop_w)\n global_d, global_h, global_w = logits_global_single.shape\n min_d = max(0, min_d - int(padding_d)//2)\n min_h = max(0, min_h - int(padding_h)//2)\n min_w = max(0, min_w - int(padding_w)//2)\n max_d = min(global_d, max_d + int(padding_d)//2)\n max_h = min(global_h, max_h + int(padding_h)//2)\n max_w = min(global_w, max_w + int(padding_w)//2)\n return min_d, min_h, min_w, max_d, max_h, max_w" }, { "identifier": "draw_result", "path": "utils/visualize.py", "snippet": "def draw_result(category, image, bboxes, points, logits, gt3D, spatial_size, work_dir):\n zoom_out_transform = transforms.Compose([\n transforms.AddChanneld(keys=[\"image\", \"label\", \"logits\"]),\n transforms.Resized(keys=[\"image\", \"label\", \"logits\"], spatial_size=spatial_size, mode='nearest-exact')\n ])\n post_item = zoom_out_transform({\n 'image': image,\n 'label': gt3D,\n 'logits': logits\n })\n image, gt3D, logits = post_item['image'][0], post_item['label'][0], post_item['logits'][0]\n preds = torch.sigmoid(logits)\n preds = (preds > 0.5).int()\n\n root_dir=os.path.join(work_dir, f'fig_examples/{category}/') \n\n if not os.path.exists(root_dir):\n os.makedirs(root_dir)\n if bboxes is not None:\n x1, y1, z1, x2, y2, z2 = bboxes[0].cpu().numpy()\n if points is not None:\n points = (points[0].cpu().numpy(), points[1].cpu().numpy())\n points_ax = points[0][0] # [n, 3]\n points_label = points[1][0] # [n]\n\n for j in range(image.shape[0]):\n img_2d = image[j, :, :].detach().cpu().numpy()\n preds_2d = preds[j, :, :].detach().cpu().numpy()\n label_2d = gt3D[j, :, :].detach().cpu().numpy()\n if np.sum(label_2d) == 0 or np.sum(preds_2d) == 0:\n continue\n\n img_2d = img_2d * 255\n # orginal img\n fig, (ax1, ax2, ax3) = plt.subplots(1, 3)\n ax1.imshow(img_2d, cmap='gray')\n ax1.set_title('Image with prompt') \n ax1.axis('off')\n\n # gt\n ax2.imshow(img_2d, cmap='gray')\n show_mask(label_2d, ax2)\n ax2.set_title('Ground truth') \n ax2.axis('off')\n\n # preds\n ax3.imshow(img_2d, cmap='gray')\n show_mask(preds_2d, ax3)\n ax3.set_title('Prediction') \n ax3.axis('off')\n\n # boxes\n if bboxes is not None:\n if j >= x1 and j <= x2:\n show_box((z1, y1, z2, y2), ax1)\n # points\n if points is not None:\n for point_idx in range(points_label.shape[0]):\n point = points_ax[point_idx]\n label = points_label[point_idx] # [1]\n if j == point[0]:\n show_points(point, label, ax1)\n \n fig.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0, hspace=0)\n plt.savefig(os.path.join(root_dir, f'{category}_{j}.png'), bbox_inches='tight')\n plt.close()" } ]
import argparse import os import torch import torch.nn.functional as F import json import monai.transforms as transforms from segment_anything_volumetric import sam_model_registry from network.model import SegVol from data_process.demo_data_process import process_ct_gt from utils.monai_inferers_utils import sliding_window_inference, generate_box, select_points, build_binary_cube, build_binary_points, logits2roi_coor from utils.visualize import draw_result
11,422
# generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad(): logits_single_cropped = sliding_window_inference( image_single_cropped.cuda(), prompt_reflection, args.spatial_size, 1, segvol_model, args.infer_overlap, text=text_single, use_box=args.use_box_prompt, use_point=args.use_point_prompt, ) logits_single_cropped = logits_single_cropped.cpu().squeeze() logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] = logits_single_cropped zoom_in_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_in_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'===> zoom out dice {zoom_out_dice:.4f} -> zoom-out-zoom-in dice {zoom_in_dice:.4f} <===') return logits_labels_record def inference_single_ct(args, segvol_model, data_item, categories): segvol_model.eval() image, gt3D = data_item["image"].float(), data_item["label"] image_zoom_out, gt3D__zoom_out = data_item["zoom_out_image"].float(), data_item['zoom_out_label'] logits_labels_record = zoom_in_zoom_out( args, segvol_model, image.unsqueeze(0), image_zoom_out.unsqueeze(0), gt3D.unsqueeze(0), gt3D__zoom_out.unsqueeze(0), categories=categories) # visualize if args.visualize: for target, values in logits_labels_record.items(): dice_score, image, point_prompt, box_prompt, logits, labels = values print(f'{target} result with Dice score {dice_score:.4f} visualizing') draw_result(target + f"-Dice {dice_score:.4f}", image, box_prompt, point_prompt, logits, labels, args.spatial_size, args.work_dir) def main(args): gpu = 0 torch.cuda.set_device(gpu) # build model sam_model = sam_model_registry['vit'](args=args)
def set_parse(): # %% set up parser parser = argparse.ArgumentParser() parser.add_argument("--test_mode", default=True, type=bool) parser.add_argument("--resume", type = str, default = '') parser.add_argument("-infer_overlap", default=0.5, type=float, help="sliding window inference overlap") parser.add_argument("-spatial_size", default=(32, 256, 256), type=tuple) parser.add_argument("-patch_size", default=(4, 16, 16), type=tuple) parser.add_argument('-work_dir', type=str, default='./work_dir') ### demo parser.add_argument('--demo_config', type=str, required=True) parser.add_argument("--clip_ckpt", type = str, default = './config/clip') args = parser.parse_args() return args def dice_score(preds, labels): # on GPU assert preds.shape[0] == labels.shape[0], "predict & target batch size don't match\n" + str(preds.shape) + str(labels.shape) predict = preds.view(1, -1) target = labels.view(1, -1) if target.shape[1] < 1e8: predict = predict.cuda() target = target.cuda() predict = torch.sigmoid(predict) predict = torch.where(predict > 0.5, 1., 0.) tp = torch.sum(torch.mul(predict, target)) den = torch.sum(predict) + torch.sum(target) + 1 dice = 2 * tp / den if target.shape[1] < 1e8: predict = predict.cpu() target = target.cpu() return dice def zoom_in_zoom_out(args, segvol_model, image, image_resize, gt3D, gt3D_resize, categories=None): logits_labels_record = {} image_single_resize = image_resize image_single = image[0,0] ori_shape = image_single.shape for item_idx in range(len(categories)): # get label to generate prompts label_single = gt3D[0][item_idx] label_single_resize = gt3D_resize[0][item_idx] # skip meaningless categories if torch.sum(label_single) == 0: print('No object, skip') continue # generate prompts text_single = categories[item_idx] if args.use_text_prompt else None if categories is not None: print(f'inference |{categories[item_idx]}| target...') points_single = None box_single = None if args.use_point_prompt: point, point_label = select_points(label_single_resize, num_positive_extra=3, num_negative_extra=3) points_single = (point.unsqueeze(0).float().cuda(), point_label.unsqueeze(0).float().cuda()) binary_points_resize = build_binary_points(point, point_label, label_single_resize.shape) if args.use_box_prompt: box_single = generate_box(label_single_resize).unsqueeze(0).float().cuda() binary_cube_resize = build_binary_cube(box_single, binary_cube_shape=label_single_resize.shape) #################### # zoom-out inference: print('--- zoom out inference ---') print(f'use text-prompt [{text_single!=None}], use box-prompt [{box_single!=None}], use point-prompt [{points_single!=None}]') with torch.no_grad(): logits_global_single = segvol_model(image_single_resize.cuda(), text=text_single, boxes=box_single, points=points_single) # resize back global logits logits_global_single = F.interpolate( logits_global_single.cpu(), size=ori_shape, mode='nearest')[0][0] # build prompt reflection for zoom-in if args.use_point_prompt: binary_points = F.interpolate( binary_points_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] if args.use_box_prompt: binary_cube = F.interpolate( binary_cube_resize.unsqueeze(0).unsqueeze(0).float(), size=ori_shape, mode='nearest')[0][0] zoom_out_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_out_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'zoom out inference done with zoom_out_dice: {zoom_out_dice:.4f}') if not args.use_zoom_in: continue #################### # zoom-in inference: min_d, min_h, min_w, max_d, max_h, max_w = logits2roi_coor(args.spatial_size, logits_global_single) if min_d is None: print('Fail to detect foreground!') continue # Crop roi image_single_cropped = image_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1].unsqueeze(0).unsqueeze(0) global_preds = (torch.sigmoid(logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1])>0.5).long() assert not (args.use_box_prompt and args.use_point_prompt) prompt_reflection = None if args.use_box_prompt: binary_cube_cropped = binary_cube[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_cube_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) if args.use_point_prompt: binary_points_cropped = binary_points[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] prompt_reflection = ( binary_points_cropped.unsqueeze(0).unsqueeze(0), global_preds.unsqueeze(0).unsqueeze(0) ) ## inference with torch.no_grad(): logits_single_cropped = sliding_window_inference( image_single_cropped.cuda(), prompt_reflection, args.spatial_size, 1, segvol_model, args.infer_overlap, text=text_single, use_box=args.use_box_prompt, use_point=args.use_point_prompt, ) logits_single_cropped = logits_single_cropped.cpu().squeeze() logits_global_single[min_d:max_d+1, min_h:max_h+1, min_w:max_w+1] = logits_single_cropped zoom_in_dice = dice_score(logits_global_single.squeeze(), label_single.squeeze()) logits_labels_record[categories[item_idx]] = ( zoom_in_dice, image_single, points_single, box_single, logits_global_single, label_single) print(f'===> zoom out dice {zoom_out_dice:.4f} -> zoom-out-zoom-in dice {zoom_in_dice:.4f} <===') return logits_labels_record def inference_single_ct(args, segvol_model, data_item, categories): segvol_model.eval() image, gt3D = data_item["image"].float(), data_item["label"] image_zoom_out, gt3D__zoom_out = data_item["zoom_out_image"].float(), data_item['zoom_out_label'] logits_labels_record = zoom_in_zoom_out( args, segvol_model, image.unsqueeze(0), image_zoom_out.unsqueeze(0), gt3D.unsqueeze(0), gt3D__zoom_out.unsqueeze(0), categories=categories) # visualize if args.visualize: for target, values in logits_labels_record.items(): dice_score, image, point_prompt, box_prompt, logits, labels = values print(f'{target} result with Dice score {dice_score:.4f} visualizing') draw_result(target + f"-Dice {dice_score:.4f}", image, box_prompt, point_prompt, logits, labels, args.spatial_size, args.work_dir) def main(args): gpu = 0 torch.cuda.set_device(gpu) # build model sam_model = sam_model_registry['vit'](args=args)
segvol_model = SegVol(
1
2023-11-10 08:25:37+00:00
16k
theroyallab/tabbyAPI
main.py
[ { "identifier": "convert_args_to_dict", "path": "args.py", "snippet": "def convert_args_to_dict(args: argparse.Namespace, parser: argparse.ArgumentParser):\n \"\"\"Broad conversion of surface level arg groups to dictionaries\"\"\"\n\n arg_groups = {}\n for group in parser._action_groups:\n group_dict = {}\n for arg in group._group_actions:\n value = getattr(args, arg.dest, None)\n if value is not None:\n group_dict[arg.dest] = value\n\n arg_groups[group.title] = group_dict\n\n return arg_groups" }, { "identifier": "init_argparser", "path": "args.py", "snippet": "def init_argparser():\n \"\"\"Creates an argument parser that any function can use\"\"\"\n\n parser = argparse.ArgumentParser(\n epilog=\"These args are only for a subset of the config. \"\n + \"Please edit config.yml for all options!\"\n )\n add_network_args(parser)\n add_model_args(parser)\n add_logging_args(parser)\n add_config_args(parser)\n\n return parser" }, { "identifier": "check_admin_key", "path": "auth.py", "snippet": "def check_admin_key(x_admin_key: str = Header(None), authorization: str = Header(None)):\n \"\"\"Check if the admin key is valid.\"\"\"\n\n # Allow request if auth is disabled\n if DISABLE_AUTH:\n return\n\n if x_admin_key:\n if not AUTH_KEYS.verify_key(x_admin_key, \"admin_key\"):\n raise HTTPException(401, \"Invalid admin key\")\n return x_admin_key\n\n if authorization:\n split_key = authorization.split(\" \")\n if len(split_key) < 2:\n raise HTTPException(401, \"Invalid admin key\")\n if split_key[0].lower() != \"bearer\" or not AUTH_KEYS.verify_key(\n split_key[1], \"admin_key\"\n ):\n raise HTTPException(401, \"Invalid admin key\")\n return authorization\n\n raise HTTPException(401, \"Please provide an admin key\")" }, { "identifier": "check_api_key", "path": "auth.py", "snippet": "def check_api_key(x_api_key: str = Header(None), authorization: str = Header(None)):\n \"\"\"Check if the API key is valid.\"\"\"\n\n # Allow request if auth is disabled\n if DISABLE_AUTH:\n return\n\n if x_api_key:\n if not AUTH_KEYS.verify_key(x_api_key, \"api_key\"):\n raise HTTPException(401, \"Invalid API key\")\n return x_api_key\n\n if authorization:\n split_key = authorization.split(\" \")\n if len(split_key) < 2:\n raise HTTPException(401, \"Invalid API key\")\n if split_key[0].lower() != \"bearer\" or not AUTH_KEYS.verify_key(\n split_key[1], \"api_key\"\n ):\n raise HTTPException(401, \"Invalid API key\")\n\n return authorization\n\n raise HTTPException(401, \"Please provide an API key\")" }, { "identifier": "load_auth_keys", "path": "auth.py", "snippet": "def load_auth_keys(disable_from_config: bool):\n \"\"\"Load the authentication keys from api_tokens.yml. If the file does not\n exist, generate new keys and save them to api_tokens.yml.\"\"\"\n global AUTH_KEYS\n global DISABLE_AUTH\n\n DISABLE_AUTH = disable_from_config\n if disable_from_config:\n logger.warning(\n \"Disabling authentication makes your instance vulnerable. \"\n \"Set the `disable_auth` flag to False in config.yml if you \"\n \"want to share this instance with others.\"\n )\n\n return\n\n try:\n with open(\"api_tokens.yml\", \"r\", encoding=\"utf8\") as auth_file:\n auth_keys_dict = yaml.safe_load(auth_file)\n AUTH_KEYS = AuthKeys.model_validate(auth_keys_dict)\n except OSError:\n new_auth_keys = AuthKeys(\n api_key=secrets.token_hex(16), admin_key=secrets.token_hex(16)\n )\n AUTH_KEYS = new_auth_keys\n\n with open(\"api_tokens.yml\", \"w\", encoding=\"utf8\") as auth_file:\n yaml.safe_dump(AUTH_KEYS.model_dump(), auth_file, default_flow_style=False)\n\n logger.info(\n f\"Your API key is: {AUTH_KEYS.api_key}\\n\"\n f\"Your admin key is: {AUTH_KEYS.admin_key}\\n\\n\"\n \"If these keys get compromised, make sure to delete api_tokens.yml \"\n \"and restart the server. Have fun!\"\n )" }, { "identifier": "override_config_from_args", "path": "config.py", "snippet": "def override_config_from_args(args: dict):\n \"\"\"Overrides the config based on a dict representation of args\"\"\"\n\n config_override = unwrap(args.get(\"options\", {}).get(\"config\"))\n if config_override:\n logger.info(\"Attempting to override config.yml from args.\")\n read_config_from_file(pathlib.Path(config_override))\n return\n\n # Network config\n network_override = args.get(\"network\")\n if network_override:\n network_config = get_network_config()\n GLOBAL_CONFIG[\"network\"] = {**network_config, **network_override}\n\n # Model config\n model_override = args.get(\"model\")\n if model_override:\n model_config = get_model_config()\n GLOBAL_CONFIG[\"model\"] = {**model_config, **model_override}\n\n # Logging config\n logging_override = args.get(\"logging\")\n if logging_override:\n logging_config = get_gen_logging_config()\n GLOBAL_CONFIG[\"logging\"] = {\n **logging_config,\n **{k.replace(\"log_\", \"\"): logging_override[k] for k in logging_override},\n }" }, { "identifier": "read_config_from_file", "path": "config.py", "snippet": "def read_config_from_file(config_path: pathlib.Path):\n \"\"\"Sets the global config from a given file path\"\"\"\n global GLOBAL_CONFIG\n\n try:\n with open(str(config_path.resolve()), \"r\", encoding=\"utf8\") as config_file:\n GLOBAL_CONFIG = unwrap(yaml.safe_load(config_file), {})\n except Exception as exc:\n logger.error(\n \"The YAML config couldn't load because of the following error: \"\n f\"\\n\\n{exc}\"\n \"\\n\\nTabbyAPI will start anyway and not parse this config file.\"\n )\n GLOBAL_CONFIG = {}" }, { "identifier": "get_gen_logging_config", "path": "config.py", "snippet": "def get_gen_logging_config():\n \"\"\"Returns the generation logging config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"logging\"), {})" }, { "identifier": "get_model_config", "path": "config.py", "snippet": "def get_model_config():\n \"\"\"Returns the model config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"model\"), {})" }, { "identifier": "get_draft_model_config", "path": "config.py", "snippet": "def get_draft_model_config():\n \"\"\"Returns the draft model config from the global config\"\"\"\n model_config = unwrap(GLOBAL_CONFIG.get(\"model\"), {})\n return unwrap(model_config.get(\"draft\"), {})" }, { "identifier": "get_lora_config", "path": "config.py", "snippet": "def get_lora_config():\n \"\"\"Returns the lora config from the global config\"\"\"\n model_config = unwrap(GLOBAL_CONFIG.get(\"model\"), {})\n return unwrap(model_config.get(\"lora\"), {})" }, { "identifier": "get_network_config", "path": "config.py", "snippet": "def get_network_config():\n \"\"\"Returns the network config from the global config\"\"\"\n return unwrap(GLOBAL_CONFIG.get(\"network\"), {})" }, { "identifier": "call_with_semaphore", "path": "generators.py", "snippet": "async def call_with_semaphore(callback: partialmethod):\n if inspect.iscoroutinefunction(callback):\n return await callback()\n async with generate_semaphore:\n return callback()" }, { "identifier": "generate_with_semaphore", "path": "generators.py", "snippet": "async def generate_with_semaphore(generator: AsyncGenerator):\n \"\"\"Generate with a semaphore.\"\"\"\n async with generate_semaphore:\n if inspect.isasyncgenfunction:\n async for result in generator():\n yield result\n else:\n for result in generator():\n yield result" }, { "identifier": "ModelContainer", "path": "model.py", "snippet": "class ModelContainer:\n \"\"\"The model container class for ExLlamaV2 models.\"\"\"\n\n config: Optional[ExLlamaV2Config] = None\n draft_config: Optional[ExLlamaV2Config] = None\n model: Optional[ExLlamaV2] = None\n draft_model: Optional[ExLlamaV2] = None\n cache: Optional[ExLlamaV2Cache] = None\n draft_cache: Optional[ExLlamaV2Cache] = None\n tokenizer: Optional[ExLlamaV2Tokenizer] = None\n generator: Optional[ExLlamaV2StreamingGenerator] = None\n prompt_template: Optional[PromptTemplate] = None\n\n cache_fp8: bool = False\n gpu_split_auto: bool = True\n gpu_split: Optional[list] = None\n use_cfg: bool = False\n\n active_loras: List[ExLlamaV2Lora] = []\n\n def __init__(self, model_directory: pathlib.Path, quiet=False, **kwargs):\n \"\"\"\n Create model container\n\n Args:\n model_dir (int): Model directory containing config.json,\n tokenizer.model etc.\n quiet (bool): Suppress console output\n load_progress_callback (function, optional): A function to call for\n each module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int,\n loading_draft: bool)\n **kwargs:\n `cache_mode` (str): Sets cache mode, \"FP16\" or \"FP8\"\n (defaulf: \"FP16\")\n 'max_seq_len' (int): Override model's default max sequence\n length (default: 4096)\n 'rope_scale' (float): Set RoPE scaling factor for model\n (default: 1.0)\n 'rope_alpha' (float): Set RoPE alpha (NTK) factor for model\n (default: 1.0)\n 'prompt_template' (str): Manually sets the prompt template for\n this model (default: None)\n 'chunk_size' (int): Sets the maximum chunk size for the model\n (default: 2048)\n Inferencing in chunks reduces overall VRAM overhead by\n processing very long sequences in smaller batches. This\n limits the size of temporary buffers needed for the hidden\n state and attention weights.\n 'draft_model_dir' (str): Draft model directory\n 'draft_rope_scale' (float): Set RoPE scaling factor for draft\n model (default: 1.0)\n 'draft_rope_alpha' (float): RoPE alpha (NTK) factor for draft\n model. By default, the draft model's alpha value is\n calculated automatically to scale to the size of the\n full model.\n 'lora_dir' (str): LoRA directory\n 'loras' (list[dict]): List of loras to be loaded, consisting of\n 'name' and 'scaling'\n 'gpu_split_auto' (bool): Automatically split model across\n available devices (default: True)\n 'gpu_split' (list[float]): Allocation for weights and (some)\n tensors, per device\n 'no_flash_attn' (bool): Turns off flash attention\n (increases vram usage) (default: False)\n 'use_cfg\" (bool): Enables CFG support. Disables flash attention\n (default: False)\n \"\"\"\n\n self.quiet = quiet\n\n self.cache_fp8 = \"cache_mode\" in kwargs and kwargs[\"cache_mode\"] == \"FP8\"\n self.gpu_split = kwargs.get(\"gpu_split\")\n self.gpu_split_auto = unwrap(kwargs.get(\"gpu_split_auto\"), True)\n\n self.config = ExLlamaV2Config()\n self.config.model_dir = str(model_directory.resolve())\n\n # Make the max seq len 4096 before preparing the config\n # This is a better default than 2038\n self.config.max_seq_len = 4096\n self.config.prepare()\n\n # Then override the base_seq_len if present\n override_base_seq_len = kwargs.get(\"override_base_seq_len\")\n if override_base_seq_len:\n self.config.max_seq_len = override_base_seq_len\n\n # Grab the base model's sequence length before overrides for\n # rope calculations\n base_seq_len = self.config.max_seq_len\n\n # Set the target seq len if present\n target_max_seq_len = kwargs.get(\"max_seq_len\")\n if target_max_seq_len:\n self.config.max_seq_len = target_max_seq_len\n\n # Set the rope scale\n self.config.scale_pos_emb = unwrap(\n kwargs.get(\"rope_scale\"), self.config.scale_pos_emb\n )\n\n # Automatically calculate rope alpha\n self.config.scale_alpha_value = unwrap(\n kwargs.get(\"rope_alpha\"), self.calculate_rope_alpha(base_seq_len)\n )\n\n if hasattr(ExLlamaV2Sampler.Settings, \"cfg_scale\"):\n self.use_cfg = unwrap(kwargs.get(\"use_cfg\"), False)\n else:\n logger.warning(\n \"CFG is not supported by the currently installed ExLlamaV2 version.\"\n )\n\n # Turn off flash attention if CFG is on\n # Workaround until batched FA2 is fixed in exllamav2 upstream\n self.config.no_flash_attn = (\n True if self.use_cfg else unwrap(kwargs.get(\"no_flash_attention\"), False)\n )\n\n # low_mem is currently broken in exllamav2. Don't use it until it's\n # fixed.\n \"\"\"\n if \"low_mem\" in kwargs and kwargs[\"low_mem\"]:\n self.config.set_low_mem()\n \"\"\"\n\n # Set prompt template override if provided\n prompt_template_name = kwargs.get(\"prompt_template\")\n if prompt_template_name:\n logger.info(\"Loading prompt template with name \" f\"{prompt_template_name}\")\n # Read the template\n self.prompt_template = get_template_from_file(prompt_template_name)\n else:\n # Then try finding the template from the tokenizer_config.json\n self.prompt_template = get_template_from_model_json(\n pathlib.Path(self.config.model_dir) / \"tokenizer_config.json\",\n \"chat_template\",\n \"from_tokenizer_config\",\n )\n\n # Try finding the chat template from the model's config.json\n # TODO: This may not even be used with huggingface models,\n # mark for removal.\n if self.prompt_template is None:\n self.prompt_template = get_template_from_model_json(\n pathlib.Path(self.config.model_config),\n \"chat_template\",\n \"from_model_config\",\n )\n\n # If that fails, attempt fetching from model name\n if self.prompt_template is None:\n template_match = find_template_from_model(model_directory)\n if template_match:\n self.prompt_template = get_template_from_file(template_match)\n\n # Catch all for template lookup errors\n if self.prompt_template:\n logger.info(\n f\"Using template {self.prompt_template.name} \" \"for chat completions.\"\n )\n else:\n logger.warning(\n \"Chat completions are disabled because a prompt \"\n \"template wasn't provided or auto-detected.\"\n )\n\n # Set num of experts per token if provided\n num_experts_override = kwargs.get(\"num_experts_per_token\")\n if num_experts_override:\n if hasattr(self.config, \"num_experts_per_token\"):\n self.config.num_experts_per_token = num_experts_override\n else:\n logger.warning(\n \"MoE experts per token override is not \"\n \"supported by the current ExLlamaV2 version.\"\n )\n\n chunk_size = min(\n unwrap(kwargs.get(\"chunk_size\"), 2048), self.config.max_seq_len\n )\n self.config.max_input_len = chunk_size\n self.config.max_attn_size = chunk_size**2\n\n draft_args = unwrap(kwargs.get(\"draft\"), {})\n draft_model_name = draft_args.get(\"draft_model_name\")\n enable_draft = draft_args and draft_model_name\n\n # Always disable draft if params are incorrectly configured\n if draft_args and draft_model_name is None:\n logger.warning(\n \"Draft model is disabled because a model name \"\n \"wasn't provided. Please check your config.yml!\"\n )\n enable_draft = False\n\n if enable_draft:\n self.draft_config = ExLlamaV2Config()\n draft_model_path = pathlib.Path(\n unwrap(draft_args.get(\"draft_model_dir\"), \"models\")\n )\n draft_model_path = draft_model_path / draft_model_name\n\n self.draft_config.model_dir = str(draft_model_path.resolve())\n self.draft_config.prepare()\n\n self.draft_config.scale_pos_emb = unwrap(\n draft_args.get(\"draft_rope_scale\"), 1.0\n )\n\n # Automatically calculate draft rope alpha\n self.draft_config.scale_alpha_value = unwrap(\n draft_args.get(\"draft_rope_alpha\"),\n self.calculate_rope_alpha(self.draft_config.max_seq_len),\n )\n self.draft_config.max_seq_len = self.config.max_seq_len\n\n if \"chunk_size\" in kwargs:\n self.draft_config.max_input_len = kwargs[\"chunk_size\"]\n self.draft_config.max_attn_size = kwargs[\"chunk_size\"] ** 2\n\n def calculate_rope_alpha(self, base_seq_len):\n \"\"\"Calculate the rope alpha value for a given sequence length.\"\"\"\n ratio = self.config.max_seq_len / base_seq_len\n\n # Default to a 1 alpha if the sequence length is ever less\n # than or equal to 1\n if ratio <= 1.0:\n alpha = 1\n else:\n alpha = -0.13436 + 0.80541 * ratio + 0.28833 * ratio**2\n return alpha\n\n def get_model_path(self, is_draft: bool = False):\n \"\"\"Get the path for this model.\"\"\"\n model_path = pathlib.Path(\n self.draft_config.model_dir if is_draft else self.config.model_dir\n )\n return model_path\n\n def load(self, progress_callback=None):\n \"\"\"\n Load model\n\n Args:\n progress_callback (function, optional): A function to call for each\n module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int)\n \"\"\"\n for _ in self.load_gen(progress_callback):\n pass\n\n def load_loras(self, lora_directory: pathlib.Path, **kwargs):\n \"\"\"\n Load loras\n \"\"\"\n\n loras = unwrap(kwargs.get(\"loras\"), [])\n success: List[str] = []\n failure: List[str] = []\n\n for lora in loras:\n lora_name = lora.get(\"name\")\n lora_scaling = unwrap(lora.get(\"scaling\"), 1.0)\n\n if lora_name is None:\n logger.warning(\n \"One of your loras does not have a name. Please check your \"\n \"config.yml! Skipping lora load.\"\n )\n failure.append(lora_name)\n continue\n\n logger.info(f\"Loading lora: {lora_name} at scaling {lora_scaling}\")\n lora_path = lora_directory / lora_name\n # FIXME(alpin): Does self.model need to be passed here?\n self.active_loras.append(\n ExLlamaV2Lora.from_directory(self.model, lora_path, lora_scaling)\n )\n logger.info(f\"Lora successfully loaded: {lora_name}\")\n success.append(lora_name)\n\n # Return success and failure names\n return {\"success\": success, \"failure\": failure}\n\n def load_gen(self, progress_callback=None):\n \"\"\"\n Load model, generator function\n\n Args:\n progress_callback (function, optional): A function to call for each\n module loaded. Prototype:\n def progress(loaded_modules: int, total_modules: int)\n \"\"\"\n\n # Load tokenizer\n self.tokenizer = ExLlamaV2Tokenizer(self.config)\n\n # Load draft model if a config is present\n if self.draft_config:\n self.draft_model = ExLlamaV2(self.draft_config)\n if not self.quiet:\n logger.info(\"Loading draft model: \" + self.draft_config.model_dir)\n\n self.draft_cache = ExLlamaV2Cache(self.draft_model, lazy=True)\n reserve = [AUTO_SPLIT_RESERVE_BYTES] + [0] * 16\n yield from self.draft_model.load_autosplit_gen(\n self.draft_cache,\n reserve_vram=reserve,\n last_id_only=True,\n callback_gen=progress_callback,\n )\n\n # Test VRAM allocation with a full-length forward pass\n input_ids = torch.zeros((1, self.config.max_input_len), dtype=torch.long)\n self.draft_model.forward(input_ids, cache=self.cache, preprocess_only=True)\n\n # Load model\n self.model = ExLlamaV2(self.config)\n if not self.quiet:\n logger.info(\"Loading model: \" + self.config.model_dir)\n\n if not self.gpu_split_auto:\n for value in self.model.load_gen(\n self.gpu_split, callback_gen=progress_callback\n ):\n if isinstance(value, str):\n yield value\n\n batch_size = 2 if self.use_cfg else 1\n if self.cache_fp8:\n self.cache = ExLlamaV2Cache_8bit(\n self.model, lazy=self.gpu_split_auto, batch_size=batch_size\n )\n else:\n self.cache = ExLlamaV2Cache(\n self.model, lazy=self.gpu_split_auto, batch_size=batch_size\n )\n\n if self.gpu_split_auto:\n reserve = [AUTO_SPLIT_RESERVE_BYTES] + [0] * 16\n yield from self.model.load_autosplit_gen(\n self.cache,\n reserve_vram=reserve,\n last_id_only=True,\n callback_gen=progress_callback,\n )\n\n # Test VRAM allocation with a full-length forward pass\n input_ids = torch.zeros((1, self.config.max_input_len), dtype=torch.long)\n self.model.forward(input_ids, cache=self.cache, preprocess_only=True)\n\n # Create generator\n self.generator = ExLlamaV2StreamingGenerator(\n self.model,\n self.cache,\n self.tokenizer,\n self.draft_model,\n self.draft_cache,\n )\n\n logger.info(\"Model successfully loaded.\")\n\n def unload(self, loras_only: bool = False):\n \"\"\"\n Free all VRAM resources used by this model\n \"\"\"\n\n for lora in self.active_loras:\n lora.unload()\n\n self.active_loras = []\n\n # Unload the entire model if not just unloading loras\n if not loras_only:\n if self.model:\n self.model.unload()\n self.model = None\n\n if self.draft_model:\n self.draft_model.unload()\n self.draft_model = None\n\n self.config = None\n self.cache = None\n self.tokenizer = None\n self.generator = None\n\n gc.collect()\n torch.cuda.empty_cache()\n\n def get_tokens(self, text: Optional[str], ids: Optional[List[int]], **kwargs):\n \"\"\"Common function for token operations\"\"\"\n if text:\n # Assume token encoding\n return self.tokenizer.encode(\n text,\n add_bos=unwrap(kwargs.get(\"add_bos_token\"), True),\n encode_special_tokens=unwrap(kwargs.get(\"encode_special_tokens\"), True),\n )\n if ids:\n # Assume token decoding\n ids = torch.tensor([ids])\n return self.tokenizer.decode(\n ids,\n decode_special_tokens=unwrap(kwargs.get(\"decode_special_tokens\"), True),\n )[0]\n\n return None\n\n def get_special_tokens(self, add_bos_token: bool, ban_eos_token: bool):\n return {\n \"bos_token\": self.tokenizer.bos_token if add_bos_token else \"\",\n \"eos_token\": self.tokenizer.eos_token if not ban_eos_token else \"\",\n \"pad_token\": self.tokenizer.pad_token,\n \"unk_token\": self.tokenizer.unk_token,\n }\n\n def check_unsupported_settings(self, **kwargs):\n # Warn of unsupported settings if the setting is enabled\n if (unwrap(kwargs.get(\"mirostat\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"mirostat\"\n ):\n logger.warning(\n \"Mirostat sampling is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"min_p\"), 0.0)) not in [0.0, 1.0] and not hasattr(\n ExLlamaV2Sampler.Settings, \"min_p\"\n ):\n logger.warning(\n \"Min-P sampling is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"tfs\"), 0.0)) not in [0.0, 1.0] and not hasattr(\n ExLlamaV2Sampler.Settings, \"tfs\"\n ):\n logger.warning(\n \"Tail-free sampling (TFS) is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"temperature_last\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"temperature_last\"\n ):\n logger.warning(\n \"Temperature last is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"top_a\"), False)) and not hasattr(\n ExLlamaV2Sampler.Settings, \"top_a\"\n ):\n logger.warning(\n \"Top-A is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n if (unwrap(kwargs.get(\"presence_penalty\"), 0.0)) != 0.0 and not hasattr(\n ExLlamaV2Sampler.Settings, \"token_presence_penalty\"\n ):\n logger.warning(\n \"Presence penalty is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n def generate(self, prompt: str, **kwargs):\n \"\"\"Generate a response to a prompt\"\"\"\n generation = list(self.generate_gen(prompt, **kwargs))\n if generation:\n response = \"\".join(map(lambda chunk: chunk[0], generation))\n return response, generation[-1][1], generation[-1][2]\n\n return \"\", 0, 0\n\n # pylint: disable=too-many-locals,too-many-branches,too-many-statements\n def generate_gen(self, prompt: str, **kwargs):\n \"\"\"\n Create generator function for prompt completion\n\n Args:\n prompt (str): Input prompt\n **kwargs:\n 'token_healing' (bool): Use token healing (default: False)\n 'temperature' (float): Sampling temperature (default: 1.0)\n 'temperature_last' (bool): Apply temperature after all other\n samplers (default: False)\n 'top_k' (int): Sampling top-K (default: 0)\n 'top_p' (float): Sampling top-P (default: 1.0)\n 'min_p' (float): Sampling min-P (default: 0.0)\n 'tfs' (float): Tail-free sampling (default: 0.0)\n 'typical' (float): Sampling typical (default: 0.0)\n 'mirostat' (bool): Use Mirostat (default: False)\n 'mirostat_tau' (float) Mirostat tau parameter (default: 1.5)\n 'mirostat_eta' (float) Mirostat eta parameter (default: 0.1)\n 'frequency_penalty' (float): Token frequency penalty (default: 0.0)\n 'presence_penalty' (float): Token presence penalty (default: 0.0)\n 'repetition_penalty' (float): Token repetition penalty\n (default: 1.15)\n 'penalty_range' (int): Penalty range\n (default: whole context)\n 'repetition_decay' (int): Repetition penalty range\n (default: same as range)\n 'stop' (List[Union[str, int]]): List of stop strings/tokens to\n end response (default: [EOS])\n 'max_tokens' (int): Max no. tokens in response (default: 150)\n 'add_bos_token' (bool): Adds the BOS token to the start of the\n prompt (default: True)\n 'ban_eos_token' (bool): Bans the EOS token from generation\n (default: False)\n 'logit_bias' (Dict[int, float]): Biases specific tokens to\n either show up more or less (default: None)\n 'stream_interval' (float): Interval in seconds between each\n output chunk (default: immediate)\n 'generate_window' (int): Space to reserve at the end of the\n model's context when generating. Rolls context window by\n the same amount if context length is exceeded to allow\n generating pastthe models max_seq_len.\n \"\"\"\n\n token_healing = unwrap(kwargs.get(\"token_healing\"), False)\n max_tokens = unwrap(kwargs.get(\"max_tokens\"), 150)\n stream_interval = unwrap(kwargs.get(\"stream_interval\"), 0)\n generate_window = min(unwrap(kwargs.get(\"generate_window\"), 512), max_tokens)\n\n # Sampler settings\n gen_settings = ExLlamaV2Sampler.Settings()\n\n self.check_unsupported_settings(**kwargs)\n\n # Apply settings\n gen_settings.temperature = unwrap(kwargs.get(\"temperature\"), 1.0)\n gen_settings.temperature_last = unwrap(kwargs.get(\"temperature_last\"), False)\n gen_settings.top_k = unwrap(kwargs.get(\"top_k\"), 0)\n gen_settings.top_p = unwrap(kwargs.get(\"top_p\"), 1.0)\n gen_settings.top_a = unwrap(kwargs.get(\"top_a\"), 0.0)\n gen_settings.min_p = unwrap(kwargs.get(\"min_p\"), 0.0)\n gen_settings.tfs = unwrap(kwargs.get(\"tfs\"), 1.0)\n gen_settings.typical = unwrap(kwargs.get(\"typical\"), 1.0)\n gen_settings.mirostat = unwrap(kwargs.get(\"mirostat\"), False)\n\n # Default tau and eta fallbacks don't matter if mirostat is off\n gen_settings.mirostat_tau = unwrap(kwargs.get(\"mirostat_tau\"), 1.5)\n gen_settings.mirostat_eta = unwrap(kwargs.get(\"mirostat_eta\"), 0.1)\n\n # Set CFG scale and negative prompt\n cfg_scale = unwrap(kwargs.get(\"cfg_scale\"), 1.0)\n negative_prompt = None\n if cfg_scale not in [None, 1.0]:\n if self.use_cfg:\n gen_settings.cfg_scale = cfg_scale\n\n # If the negative prompt is empty, use the BOS token\n negative_prompt = unwrap(\n kwargs.get(\"negative_prompt\"), self.tokenizer.bos_token\n )\n else:\n logger.warn(\n \"CFG is currently disabled. \"\n + \"Please reload your model with use_cfg = True.\",\n )\n\n gen_settings.token_presence_penalty = unwrap(\n kwargs.get(\"presence_penalty\"), 0.0\n )\n gen_settings.token_repetition_penalty = unwrap(\n kwargs.get(\"repetition_penalty\"), 1.0\n )\n\n # Applies for all penalties despite being called token_repetition_range\n gen_settings.token_repetition_range = unwrap(\n kwargs.get(\"penalty_range\"), self.config.max_seq_len\n )\n auto_scale_penalty_range = False\n\n frequency_penalty = unwrap(kwargs.get(\"frequency_penalty\"), 0.0)\n if hasattr(gen_settings, \"token_frequency_penalty\"):\n gen_settings.token_frequency_penalty = frequency_penalty\n\n # Dynamically scale penalty range to output tokens\n # Only do this if freq/pres pen is enabled\n # and the repetition range is -1\n auto_scale_penalty_range = (\n gen_settings.token_frequency_penalty != 0\n or gen_settings.token_presence_penalty != 0\n ) and gen_settings.token_repetition_range == -1\n elif frequency_penalty != 0.0:\n logger.warning(\n \"Frequency penalty is not supported by the currently \"\n \"installed ExLlamaV2 version.\"\n )\n\n # Override the repetition penalty value if it isn't set already\n # if the user is on an older exl2 version\n if unwrap(gen_settings.token_repetition_penalty, 1.0) == 1.0:\n gen_settings.token_repetition_penalty = frequency_penalty\n logger.warning(\"Setting this value to repetition penalty instead.\")\n\n # Always make sure the fallback is 0 if range < 0\n # It's technically fine to use -1, but this just validates the passed\n # fallback\n # Always default to 0 if something goes wrong\n if gen_settings.token_repetition_range < 0:\n fallback_decay = 0\n else:\n fallback_decay = gen_settings.token_repetition_range\n gen_settings.token_repetition_decay = coalesce(\n kwargs.get(\"repetition_decay\"), fallback_decay, 0\n )\n\n stop_conditions: List[Union[str, int]] = unwrap(kwargs.get(\"stop\"), [])\n add_bos_token = unwrap(kwargs.get(\"add_bos_token\"), True)\n ban_eos_token = unwrap(kwargs.get(\"ban_eos_token\"), False)\n logit_bias = kwargs.get(\"logit_bias\")\n\n # Override sampler settings for temp = 0\n if gen_settings.temperature == 0:\n gen_settings.temperature = 1.0\n gen_settings.top_k = 1\n gen_settings.top_p = 0\n gen_settings.typical = 0\n\n # Log generation options to console\n # Some options are too large, so log the args instead\n log_generation_params(\n max_tokens=max_tokens,\n **vars(gen_settings),\n token_healing=token_healing,\n auto_scale_penalty_range=auto_scale_penalty_range,\n add_bos_token=add_bos_token,\n ban_eos_token=ban_eos_token,\n stop_conditions=stop_conditions,\n logit_bias=logit_bias,\n )\n\n # Log prompt to console\n log_prompt(prompt, negative_prompt)\n\n # Set logit bias\n if logit_bias:\n # Create a vocab tensor if it doesn't exist for token biasing\n if gen_settings.token_bias is None:\n padding = -self.tokenizer.config.vocab_size % 32\n gen_settings.token_bias = torch.zeros(\n (self.tokenizer.config.vocab_size + padding,),\n dtype=torch.float,\n )\n\n # Map logits to the tensor with their biases\n for token, bias in logit_bias.items():\n gen_settings.token_bias[token] = bias\n\n # Ban the EOS token if specified. If not, append to stop conditions\n # as well.\n # Set this below logging to avoid polluting the stop strings array\n if ban_eos_token:\n gen_settings.disallow_tokens(self.tokenizer, [self.tokenizer.eos_token_id])\n else:\n stop_conditions.append(self.tokenizer.eos_token_id)\n\n # Stop conditions\n self.generator.set_stop_conditions(stop_conditions)\n\n # Tokenized context\n ids, offsets = self.tokenizer.encode(\n [prompt, negative_prompt]\n if negative_prompt and gen_settings.cfg_scale not in [None, 1.0]\n else prompt,\n add_bos=add_bos_token,\n encode_special_tokens=True,\n return_offsets=True,\n )\n mask = (\n self.tokenizer.padding_mask(ids)\n if self.use_cfg and gen_settings.cfg_scale not in [None, 1.0]\n else None\n )\n context_len = len(ids[0])\n\n if context_len > self.config.max_seq_len:\n logger.warning(\n f\"Context length {context_len} is greater than max_seq_len \"\n f\"{self.config.max_seq_len}. Generation is truncated and \"\n \"metrics may not be accurate.\"\n )\n\n prompt_tokens = ids.shape[-1]\n\n # Begin\n generated_tokens = 0\n full_response = \"\"\n start_time = time.time()\n last_chunk_time = start_time\n\n save_tokens = torch.empty((ids.shape[0], 0), dtype=torch.bool)\n chunk_buffer = \"\"\n chunk_tokens = 0\n\n while True:\n # Ingest prompt\n if chunk_tokens == 0:\n ids = torch.cat((ids, save_tokens), dim=-1)\n save_tokens = torch.empty((ids.shape[0], 0), dtype=torch.bool)\n overflow = ids.shape[-1] + generate_window - self.config.max_seq_len\n active_ids = ids[:, max(0, overflow) :]\n chunk_tokens = self.config.max_seq_len - active_ids.shape[-1]\n\n # Split for exllama versions that have CFG\n if self.use_cfg:\n self.generator.begin_stream(\n active_ids,\n gen_settings,\n token_healing=token_healing,\n loras=self.active_loras,\n input_mask=mask,\n position_offsets=offsets,\n )\n else:\n self.generator.begin_stream(\n active_ids,\n gen_settings,\n token_healing=token_healing,\n loras=self.active_loras,\n )\n\n # Reset offsets for subsequent passes if the context is truncated\n offsets = None\n\n if auto_scale_penalty_range:\n gen_settings.token_repetition_range = generated_tokens\n\n # Generate\n chunk, eos, tokens = self.generator.stream()\n\n if token_healing:\n # Extract healed token\n ids[:, -1] = self.generator.sequence_ids[:, -2]\n token_healing = False\n\n save_tokens = torch.cat(\n (save_tokens, tokens.expand(save_tokens.shape[0], -1)), dim=-1\n )\n chunk_buffer += chunk\n\n generated_tokens += 1\n chunk_tokens -= 1\n\n # Yield output\n now = time.time()\n elapsed = now - last_chunk_time\n\n if chunk_buffer != \"\" and (\n elapsed > stream_interval or eos or generated_tokens == max_tokens\n ):\n yield chunk_buffer, prompt_tokens, generated_tokens\n full_response += chunk_buffer\n chunk_buffer = \"\"\n last_chunk_time = now\n\n if eos or generated_tokens == max_tokens:\n break\n\n # Print response\n log_response(full_response)\n\n elapsed_time = last_chunk_time - start_time\n\n initial_response = (\n f\"Metrics: {generated_tokens} tokens generated in \"\n f\"{round(elapsed_time, 2)} seconds\"\n )\n itemization = []\n extra_parts = []\n\n # Add tokens per second\n tokens_per_second = (\n \"Indeterminate\"\n if elapsed_time == 0\n else round(generated_tokens / elapsed_time, 2)\n )\n itemization.append(f\"{tokens_per_second} T/s\")\n\n # Add context (original token count)\n if ids is not None:\n itemization.append(f\"context {context_len} tokens\")\n\n if context_len > self.config.max_seq_len:\n extra_parts.append(\"<-- Not accurate (truncated)\")\n\n # Print output\n logger.info(\n initial_response\n + \" (\"\n + \", \".join(itemization)\n + \") \"\n + \" \".join(extra_parts)\n )" }, { "identifier": "CompletionRequest", "path": "OAI/types/completion.py", "snippet": "class CompletionRequest(CommonCompletionRequest):\n \"\"\"Represents a completion request.\"\"\"\n\n # Prompt can also contain token ids, but that's out of scope\n # for this project.\n prompt: Union[str, List[str]]" }, { "identifier": "ChatCompletionRequest", "path": "OAI/types/chat_completion.py", "snippet": "class ChatCompletionRequest(CommonCompletionRequest):\n # Messages\n # Take in a string as well even though it's not part of the OAI spec\n messages: Union[str, List[Dict[str, str]]]\n prompt_template: Optional[str] = None\n add_generation_prompt: Optional[bool] = True" }, { "identifier": "LoraCard", "path": "OAI/types/lora.py", "snippet": "class LoraCard(BaseModel):\n \"\"\"Represents a single Lora card.\"\"\"\n\n id: str = \"test\"\n object: str = \"lora\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n scaling: Optional[float] = None" }, { "identifier": "LoraList", "path": "OAI/types/lora.py", "snippet": "class LoraList(BaseModel):\n \"\"\"Represents a list of Lora cards.\"\"\"\n\n object: str = \"list\"\n data: List[LoraCard] = Field(default_factory=list)" }, { "identifier": "LoraLoadRequest", "path": "OAI/types/lora.py", "snippet": "class LoraLoadRequest(BaseModel):\n \"\"\"Represents a Lora load request.\"\"\"\n\n loras: List[LoraLoadInfo]" }, { "identifier": "LoraLoadResponse", "path": "OAI/types/lora.py", "snippet": "class LoraLoadResponse(BaseModel):\n \"\"\"Represents a Lora load response.\"\"\"\n\n success: List[str] = Field(default_factory=list)\n failure: List[str] = Field(default_factory=list)" }, { "identifier": "ModelCard", "path": "OAI/types/model.py", "snippet": "class ModelCard(BaseModel):\n \"\"\"Represents a single model card.\"\"\"\n\n id: str = \"test\"\n object: str = \"model\"\n created: int = Field(default_factory=lambda: int(time()))\n owned_by: str = \"tabbyAPI\"\n logging: Optional[LogPreferences] = None\n parameters: Optional[ModelCardParameters] = None" }, { "identifier": "ModelLoadRequest", "path": "OAI/types/model.py", "snippet": "class ModelLoadRequest(BaseModel):\n \"\"\"Represents a model load request.\"\"\"\n\n name: str\n\n # Max seq len is fetched from config.json of the model by default\n max_seq_len: Optional[int] = Field(\n description=\"Leave this blank to use the model's base sequence length\",\n default=None,\n examples=[4096],\n )\n override_base_seq_len: Optional[int] = Field(\n description=(\n \"Overrides the model's base sequence length. \" \"Leave blank if unsure\"\n ),\n default=None,\n examples=[4096],\n )\n gpu_split_auto: Optional[bool] = True\n gpu_split: Optional[List[float]] = Field(\n default_factory=list, examples=[[24.0, 20.0]]\n )\n rope_scale: Optional[float] = Field(\n description=\"Automatically pulled from the model's config if not present\",\n default=None,\n examples=[1.0],\n )\n rope_alpha: Optional[float] = Field(\n description=\"Automatically calculated if not present\",\n default=None,\n examples=[1.0],\n )\n no_flash_attention: Optional[bool] = False\n # low_mem: Optional[bool] = False\n cache_mode: Optional[str] = \"FP16\"\n prompt_template: Optional[str] = None\n num_experts_per_token: Optional[int] = None\n use_cfg: Optional[bool] = None\n draft: Optional[DraftModelLoadRequest] = None" }, { "identifier": "ModelLoadResponse", "path": "OAI/types/model.py", "snippet": "class ModelLoadResponse(BaseModel):\n \"\"\"Represents a model load response.\"\"\"\n\n # Avoids pydantic namespace warning\n model_config = ConfigDict(protected_namespaces=[])\n\n model_type: str = \"model\"\n module: int\n modules: int\n status: str" }, { "identifier": "ModelCardParameters", "path": "OAI/types/model.py", "snippet": "class ModelCardParameters(BaseModel):\n \"\"\"Represents model card parameters.\"\"\"\n\n # Safe to do this since it's guaranteed to fetch a max seq len\n # from model_container\n max_seq_len: Optional[int] = None\n rope_scale: Optional[float] = 1.0\n rope_alpha: Optional[float] = 1.0\n cache_mode: Optional[str] = \"FP16\"\n prompt_template: Optional[str] = None\n num_experts_per_token: Optional[int] = None\n use_cfg: Optional[bool] = None\n draft: Optional[\"ModelCard\"] = None" }, { "identifier": "TemplateList", "path": "OAI/types/template.py", "snippet": "class TemplateList(BaseModel):\n \"\"\"Represents a list of templates.\"\"\"\n\n object: str = \"list\"\n data: List[str] = Field(default_factory=list)" }, { "identifier": "TokenEncodeRequest", "path": "OAI/types/token.py", "snippet": "class TokenEncodeRequest(CommonTokenRequest):\n \"\"\"Represents a tokenization request.\"\"\"\n\n text: str" }, { "identifier": "TokenEncodeResponse", "path": "OAI/types/token.py", "snippet": "class TokenEncodeResponse(BaseModel):\n \"\"\"Represents a tokenization response.\"\"\"\n\n tokens: List[int]\n length: int" }, { "identifier": "TokenDecodeRequest", "path": "OAI/types/token.py", "snippet": "class TokenDecodeRequest(CommonTokenRequest):\n \"\"\" \" Represents a detokenization request.\"\"\"\n\n tokens: List[int]" }, { "identifier": "TokenDecodeResponse", "path": "OAI/types/token.py", "snippet": "class TokenDecodeResponse(BaseModel):\n \"\"\"Represents a detokenization response.\"\"\"\n\n text: str" }, { "identifier": "create_completion_response", "path": "OAI/utils_oai.py", "snippet": "def create_completion_response(\n text: str,\n prompt_tokens: int,\n completion_tokens: int,\n model_name: Optional[str],\n):\n \"\"\"Create a completion response from the provided text.\"\"\"\n choice = CompletionRespChoice(finish_reason=\"Generated\", text=text)\n\n response = CompletionResponse(\n choices=[choice],\n model=unwrap(model_name, \"\"),\n usage=UsageStats(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n ),\n )\n\n return response" }, { "identifier": "get_model_list", "path": "OAI/utils_oai.py", "snippet": "def get_model_list(model_path: pathlib.Path, draft_model_path: Optional[str] = None):\n \"\"\"Get the list of models from the provided path.\"\"\"\n\n # Convert the provided draft model path to a pathlib path for\n # equality comparisons\n if draft_model_path:\n draft_model_path = pathlib.Path(draft_model_path).resolve()\n\n model_card_list = ModelList()\n for path in model_path.iterdir():\n # Don't include the draft models path\n if path.is_dir() and path != draft_model_path:\n model_card = ModelCard(id=path.name)\n model_card_list.data.append(model_card) # pylint: disable=no-member\n\n return model_card_list" }, { "identifier": "get_lora_list", "path": "OAI/utils_oai.py", "snippet": "def get_lora_list(lora_path: pathlib.Path):\n \"\"\"Get the list of Lora cards from the provided path.\"\"\"\n lora_list = LoraList()\n for path in lora_path.iterdir():\n if path.is_dir():\n lora_card = LoraCard(id=path.name)\n lora_list.data.append(lora_card) # pylint: disable=no-member\n\n return lora_list" }, { "identifier": "create_chat_completion_response", "path": "OAI/utils_oai.py", "snippet": "def create_chat_completion_response(\n text: str,\n prompt_tokens: int,\n completion_tokens: int,\n model_name: Optional[str],\n):\n \"\"\"Create a chat completion response from the provided text.\"\"\"\n message = ChatCompletionMessage(role=\"assistant\", content=text)\n\n choice = ChatCompletionRespChoice(finish_reason=\"Generated\", message=message)\n\n response = ChatCompletionResponse(\n choices=[choice],\n model=unwrap(model_name, \"\"),\n usage=UsageStats(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n ),\n )\n\n return response" }, { "identifier": "create_chat_completion_stream_chunk", "path": "OAI/utils_oai.py", "snippet": "def create_chat_completion_stream_chunk(\n const_id: str,\n text: Optional[str] = None,\n model_name: Optional[str] = None,\n finish_reason: Optional[str] = None,\n):\n \"\"\"Create a chat completion stream chunk from the provided text.\"\"\"\n if finish_reason:\n message = {}\n else:\n message = ChatCompletionMessage(role=\"assistant\", content=text)\n\n # The finish reason can be None\n choice = ChatCompletionStreamChoice(finish_reason=finish_reason, delta=message)\n\n chunk = ChatCompletionStreamChunk(\n id=const_id, choices=[choice], model=unwrap(model_name, \"\")\n )\n\n return chunk" }, { "identifier": "get_all_templates", "path": "templating.py", "snippet": "def get_all_templates():\n \"\"\"Fetches all templates from the templates directory\"\"\"\n\n template_directory = pathlib.Path(\"templates\")\n return template_directory.glob(\"*.jinja\")" }, { "identifier": "get_prompt_from_template", "path": "templating.py", "snippet": "def get_prompt_from_template(\n messages,\n prompt_template: PromptTemplate,\n add_generation_prompt: bool,\n special_tokens: Optional[Dict[str, str]] = None,\n):\n \"\"\"Get a prompt from a template and a list of messages.\"\"\"\n if version.parse(package_version(\"jinja2\")) < version.parse(\"3.0.0\"):\n raise ImportError(\n \"Parsing these chat completion messages requires jinja2 3.0.0 \"\n f\"or greater. Current version: {package_version('jinja2')}\\n\"\n \"Please upgrade jinja by running the following command: \"\n \"pip install --upgrade jinja2\"\n )\n\n compiled_template = _compile_template(prompt_template.template)\n return compiled_template.render(\n messages=messages,\n add_generation_prompt=add_generation_prompt,\n **special_tokens,\n )" }, { "identifier": "get_generator_error", "path": "utils.py", "snippet": "def get_generator_error(message: str):\n \"\"\"Get a generator error.\"\"\"\n error_message = TabbyGeneratorErrorMessage(\n message=message, trace=traceback.format_exc()\n )\n\n generator_error = TabbyGeneratorError(error=error_message)\n\n # Log and send the exception\n logger.error(generator_error.error.message)\n return get_sse_packet(generator_error.model_dump_json())" }, { "identifier": "get_sse_packet", "path": "utils.py", "snippet": "def get_sse_packet(json_data: str):\n \"\"\"Get an SSE packet.\"\"\"\n return f\"data: {json_data}\\n\\n\"" }, { "identifier": "load_progress", "path": "utils.py", "snippet": "def load_progress(module, modules):\n \"\"\"Wrapper callback for load progress.\"\"\"\n yield module, modules" }, { "identifier": "unwrap", "path": "utils.py", "snippet": "def unwrap(wrapped, default=None):\n \"\"\"Unwrap function for Optionals.\"\"\"\n if wrapped is None:\n return default\n\n return wrapped" }, { "identifier": "init_logger", "path": "logger.py", "snippet": "def init_logger(name: str):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(_default_handler)\n logger.propagate = False\n return logger" } ]
import pathlib import uvicorn import gen_logging from asyncio import CancelledError from typing import Optional from uuid import uuid4 from jinja2 import TemplateError from fastapi import FastAPI, Depends, HTTPException, Request from fastapi.middleware.cors import CORSMiddleware from fastapi.responses import StreamingResponse from functools import partial from progress.bar import IncrementalBar from args import convert_args_to_dict, init_argparser from auth import check_admin_key, check_api_key, load_auth_keys from config import ( override_config_from_args, read_config_from_file, get_gen_logging_config, get_model_config, get_draft_model_config, get_lora_config, get_network_config, ) from generators import call_with_semaphore, generate_with_semaphore from model import ModelContainer from OAI.types.completion import CompletionRequest from OAI.types.chat_completion import ChatCompletionRequest from OAI.types.lora import LoraCard, LoraList, LoraLoadRequest, LoraLoadResponse from OAI.types.model import ( ModelCard, ModelLoadRequest, ModelLoadResponse, ModelCardParameters, ) from OAI.types.template import TemplateList from OAI.types.token import ( TokenEncodeRequest, TokenEncodeResponse, TokenDecodeRequest, TokenDecodeResponse, ) from OAI.utils_oai import ( create_completion_response, get_model_list, get_lora_list, create_chat_completion_response, create_chat_completion_stream_chunk, ) from templating import get_all_templates, get_prompt_from_template from utils import get_generator_error, get_sse_packet, load_progress, unwrap from logger import init_logger
14,216
success=unwrap(result.get("success"), []), failure=unwrap(result.get("failure"), []), ) # Unload lora endpoint @app.post( "/v1/lora/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_loras(): """Unloads the currently loaded loras.""" MODEL_CONTAINER.unload(True) # Encode tokens endpoint @app.post( "/v1/token/encode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def encode_tokens(data: TokenEncodeRequest): """Encodes a string into tokens.""" raw_tokens = MODEL_CONTAINER.get_tokens(data.text, None, **data.get_params()) # Have to use this if check otherwise Torch's tensors error out # with a boolean issue tokens = raw_tokens[0].tolist() if raw_tokens is not None else [] response = TokenEncodeResponse(tokens=tokens, length=len(tokens)) return response # Decode tokens endpoint @app.post( "/v1/token/decode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def decode_tokens(data: TokenDecodeRequest): """Decodes tokens into a string.""" message = MODEL_CONTAINER.get_tokens(None, data.tokens, **data.get_params()) response = TokenDecodeResponse(text=unwrap(message, "")) return response # Completions endpoint @app.post( "/v1/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_completion(request: Request, data: CompletionRequest): """Generates a completion from a prompt.""" model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.prompt, list): data.prompt = "\n".join(data.prompt) if data.stream: async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( data.prompt, **data.to_gen_params() ) for part, prompt_tokens, completion_tokens in new_generation: if await request.is_disconnected(): break response = create_completion_response( part, prompt_tokens, completion_tokens, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation yield get_sse_packet("[DONE]") except CancelledError: logger.error("Completion request cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, data.prompt, **data.to_gen_params()) ) response = create_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response # Chat completions endpoint @app.post( "/v1/chat/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_chat_completion(request: Request, data: ChatCompletionRequest): """Generates a chat completion from a prompt.""" if MODEL_CONTAINER.prompt_template is None: raise HTTPException( 422, "This endpoint is disabled because a prompt template is not set.", ) model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.messages, str): prompt = data.messages else: try: special_tokens_dict = MODEL_CONTAINER.get_special_tokens( unwrap(data.add_bos_token, True), unwrap(data.ban_eos_token, False), )
"""The main tabbyAPI module. Contains the FastAPI server and endpoints.""" logger = init_logger(__name__) app = FastAPI( title="TabbyAPI", summary="An OAI compatible exllamav2 API that's both lightweight and fast", description=( "This docs page is not meant to send requests! Please use a service " "like Postman or a frontend UI." ), ) # Globally scoped variables. Undefined until initalized in main MODEL_CONTAINER: Optional[ModelContainer] = None def _check_model_container(): if MODEL_CONTAINER is None or MODEL_CONTAINER.model is None: raise HTTPException(400, "No models are loaded.") # ALlow CORS requests app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) # Model list endpoint @app.get("/v1/models", dependencies=[Depends(check_api_key)]) @app.get("/v1/model/list", dependencies=[Depends(check_api_key)]) async def list_models(): """Lists all models in the model directory.""" model_config = get_model_config() model_dir = unwrap(model_config.get("model_dir"), "models") model_path = pathlib.Path(model_dir) draft_model_dir = get_draft_model_config().get("draft_model_dir") models = get_model_list(model_path.resolve(), draft_model_dir) if unwrap(model_config.get("use_dummy_models"), False): models.data.insert(0, ModelCard(id="gpt-3.5-turbo")) return models # Currently loaded model endpoint @app.get( "/v1/model", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) @app.get( "/v1/internal/model/info", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_current_model(): """Returns the currently loaded model.""" model_name = MODEL_CONTAINER.get_model_path().name prompt_template = MODEL_CONTAINER.prompt_template model_card = ModelCard( id=model_name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.config.max_seq_len, cache_mode="FP8" if MODEL_CONTAINER.cache_fp8 else "FP16", prompt_template=prompt_template.name if prompt_template else None, num_experts_per_token=MODEL_CONTAINER.config.num_experts_per_token, use_cfg=MODEL_CONTAINER.use_cfg, ), logging=gen_logging.PREFERENCES, ) if MODEL_CONTAINER.draft_config: draft_card = ModelCard( id=MODEL_CONTAINER.get_model_path(True).name, parameters=ModelCardParameters( rope_scale=MODEL_CONTAINER.draft_config.scale_pos_emb, rope_alpha=MODEL_CONTAINER.draft_config.scale_alpha_value, max_seq_len=MODEL_CONTAINER.draft_config.max_seq_len, ), ) model_card.parameters.draft = draft_card return model_card @app.get("/v1/model/draft/list", dependencies=[Depends(check_api_key)]) async def list_draft_models(): """Lists all draft models in the model directory.""" draft_model_dir = unwrap(get_draft_model_config().get("draft_model_dir"), "models") draft_model_path = pathlib.Path(draft_model_dir) models = get_model_list(draft_model_path.resolve()) return models # Load model endpoint @app.post("/v1/model/load", dependencies=[Depends(check_admin_key)]) async def load_model(request: Request, data: ModelLoadRequest): """Loads a model into the model container.""" global MODEL_CONTAINER if MODEL_CONTAINER and MODEL_CONTAINER.model: raise HTTPException(400, "A model is already loaded! Please unload it first.") if not data.name: raise HTTPException(400, "model_name not found.") model_path = pathlib.Path(unwrap(get_model_config().get("model_dir"), "models")) model_path = model_path / data.name load_data = data.model_dump() if data.draft: if not data.draft.draft_model_name: raise HTTPException( 400, "draft_model_name was not found inside the draft object." ) load_data["draft"]["draft_model_dir"] = unwrap( get_draft_model_config().get("draft_model_dir"), "models" ) if not model_path.exists(): raise HTTPException(400, "model_path does not exist. Check model_name?") MODEL_CONTAINER = ModelContainer(model_path.resolve(), False, **load_data) async def generator(): """Generator for the loading process.""" model_type = "draft" if MODEL_CONTAINER.draft_config else "model" load_status = MODEL_CONTAINER.load_gen(load_progress) try: for module, modules in load_status: if await request.is_disconnected(): break if module == 0: loading_bar: IncrementalBar = IncrementalBar("Modules", max=modules) elif module == modules: loading_bar.next() loading_bar.finish() response = ModelLoadResponse( model_type=model_type, module=module, modules=modules, status="finished", ) yield get_sse_packet(response.model_dump_json()) # Switch to model progress if the draft model is loaded if MODEL_CONTAINER.draft_config: model_type = "model" else: loading_bar.next() response = ModelLoadResponse( model_type=model_type, module=module, modules=modules, status="processing", ) yield get_sse_packet(response.model_dump_json()) except CancelledError: logger.error( "Model load cancelled by user. " "Please make sure to run unload to free up resources." ) except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse(generator(), media_type="text/event-stream") # Unload model endpoint @app.post( "/v1/model/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_model(): """Unloads the currently loaded model.""" global MODEL_CONTAINER MODEL_CONTAINER.unload() MODEL_CONTAINER = None @app.get("/v1/templates", dependencies=[Depends(check_api_key)]) @app.get("/v1/template/list", dependencies=[Depends(check_api_key)]) async def get_templates(): templates = get_all_templates() template_strings = list(map(lambda template: template.stem, templates)) return TemplateList(data=template_strings) # Lora list endpoint @app.get("/v1/loras", dependencies=[Depends(check_api_key)]) @app.get("/v1/lora/list", dependencies=[Depends(check_api_key)]) async def get_all_loras(): """Lists all LoRAs in the lora directory.""" lora_path = pathlib.Path(unwrap(get_lora_config().get("lora_dir"), "loras")) loras = get_lora_list(lora_path.resolve()) return loras # Currently loaded loras endpoint @app.get( "/v1/lora", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def get_active_loras(): """Returns the currently loaded loras.""" active_loras = LoraList( data=list( map( lambda lora: LoraCard( id=pathlib.Path(lora.lora_path).parent.name, scaling=lora.lora_scaling * lora.lora_r / lora.lora_alpha, ), MODEL_CONTAINER.active_loras, ) ) ) return active_loras # Load lora endpoint @app.post( "/v1/lora/load", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def load_lora(data: LoraLoadRequest): """Loads a LoRA into the model container.""" if not data.loras: raise HTTPException(400, "List of loras to load is not found.") lora_dir = pathlib.Path(unwrap(get_lora_config().get("lora_dir"), "loras")) if not lora_dir.exists(): raise HTTPException( 400, "A parent lora directory does not exist. Check your config.yml?", ) # Clean-up existing loras if present if len(MODEL_CONTAINER.active_loras) > 0: MODEL_CONTAINER.unload(True) result = MODEL_CONTAINER.load_loras(lora_dir, **data.model_dump()) return LoraLoadResponse( success=unwrap(result.get("success"), []), failure=unwrap(result.get("failure"), []), ) # Unload lora endpoint @app.post( "/v1/lora/unload", dependencies=[Depends(check_admin_key), Depends(_check_model_container)], ) async def unload_loras(): """Unloads the currently loaded loras.""" MODEL_CONTAINER.unload(True) # Encode tokens endpoint @app.post( "/v1/token/encode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def encode_tokens(data: TokenEncodeRequest): """Encodes a string into tokens.""" raw_tokens = MODEL_CONTAINER.get_tokens(data.text, None, **data.get_params()) # Have to use this if check otherwise Torch's tensors error out # with a boolean issue tokens = raw_tokens[0].tolist() if raw_tokens is not None else [] response = TokenEncodeResponse(tokens=tokens, length=len(tokens)) return response # Decode tokens endpoint @app.post( "/v1/token/decode", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def decode_tokens(data: TokenDecodeRequest): """Decodes tokens into a string.""" message = MODEL_CONTAINER.get_tokens(None, data.tokens, **data.get_params()) response = TokenDecodeResponse(text=unwrap(message, "")) return response # Completions endpoint @app.post( "/v1/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_completion(request: Request, data: CompletionRequest): """Generates a completion from a prompt.""" model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.prompt, list): data.prompt = "\n".join(data.prompt) if data.stream: async def generator(): """Generator for the generation process.""" try: new_generation = MODEL_CONTAINER.generate_gen( data.prompt, **data.to_gen_params() ) for part, prompt_tokens, completion_tokens in new_generation: if await request.is_disconnected(): break response = create_completion_response( part, prompt_tokens, completion_tokens, model_path.name ) yield get_sse_packet(response.model_dump_json()) # Yield a finish response on successful generation yield get_sse_packet("[DONE]") except CancelledError: logger.error("Completion request cancelled by user.") except Exception as exc: yield get_generator_error(str(exc)) return StreamingResponse( generate_with_semaphore(generator), media_type="text/event-stream" ) response_text, prompt_tokens, completion_tokens = await call_with_semaphore( partial(MODEL_CONTAINER.generate, data.prompt, **data.to_gen_params()) ) response = create_completion_response( response_text, prompt_tokens, completion_tokens, model_path.name ) return response # Chat completions endpoint @app.post( "/v1/chat/completions", dependencies=[Depends(check_api_key), Depends(_check_model_container)], ) async def generate_chat_completion(request: Request, data: ChatCompletionRequest): """Generates a chat completion from a prompt.""" if MODEL_CONTAINER.prompt_template is None: raise HTTPException( 422, "This endpoint is disabled because a prompt template is not set.", ) model_path = MODEL_CONTAINER.get_model_path() if isinstance(data.messages, str): prompt = data.messages else: try: special_tokens_dict = MODEL_CONTAINER.get_special_tokens( unwrap(data.add_bos_token, True), unwrap(data.ban_eos_token, False), )
prompt = get_prompt_from_template(
36
2023-11-10 05:54:02+00:00
16k
ShipBit/wingman-ai
services/tower.py
[ { "identifier": "MissingApiKeyException", "path": "exceptions.py", "snippet": "class MissingApiKeyException(Exception):\n pass" }, { "identifier": "OpenAiWingman", "path": "wingmen/open_ai_wingman.py", "snippet": "class OpenAiWingman(Wingman):\n \"\"\"Our OpenAI Wingman base gives you everything you need to interact with OpenAI's various APIs.\n\n It transcribes speech to text using Whisper, uses the Completion API for conversation and implements the Tools API to execute functions.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n super().__init__(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n )\n\n self.openai: OpenAi = None # validate will set this\n \"\"\"Our OpenAI API wrapper\"\"\"\n\n # every conversation starts with the \"context\" that the user has configured\n self.messages = [\n {\"role\": \"system\", \"content\": self.config[\"openai\"].get(\"context\")}\n ]\n \"\"\"The conversation history that is used for the GPT calls\"\"\"\n\n self.edge_tts = EdgeTTS(app_root_dir)\n self.last_transcript_locale = None\n self.elevenlabs_api_key = None\n self.azure_keys = {\n \"tts\": None,\n \"whisper\": None,\n \"conversation\": None,\n \"summarize\": None,\n }\n self.stt_provider = self.config[\"features\"].get(\"stt_provider\", None)\n self.conversation_provider = self.config[\"features\"].get(\n \"conversation_provider\", None\n )\n self.summarize_provider = self.config[\"features\"].get(\n \"summarize_provider\", None\n )\n\n def validate(self):\n errors = super().validate()\n openai_api_key = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"openai\",\n friendly_key_name=\"OpenAI API key\",\n prompt_if_missing=True,\n )\n if not openai_api_key:\n errors.append(\n \"Missing 'openai' API key. Please provide a valid key in the settings.\"\n )\n else:\n openai_organization = self.config[\"openai\"].get(\"organization\")\n openai_base_url = self.config[\"openai\"].get(\"base_url\")\n self.openai = OpenAi(openai_api_key, openai_organization, openai_base_url)\n\n self.__validate_elevenlabs_config(errors)\n\n self.__validate_azure_config(errors)\n\n return errors\n\n def __validate_elevenlabs_config(self, errors):\n if self.tts_provider == \"elevenlabs\":\n self.elevenlabs_api_key = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"elevenlabs\",\n friendly_key_name=\"Elevenlabs API key\",\n prompt_if_missing=True,\n )\n if not self.elevenlabs_api_key:\n errors.append(\n \"Missing 'elevenlabs' API key. Please provide a valid key in the settings or use another tts_provider.\"\n )\n return\n elevenlabs_settings = self.config.get(\"elevenlabs\")\n if not elevenlabs_settings:\n errors.append(\n \"Missing 'elevenlabs' section in config. Please provide a valid config or change the TTS provider.\"\n )\n return\n if not elevenlabs_settings.get(\"model\"):\n errors.append(\"Missing 'model' setting in 'elevenlabs' config.\")\n return\n voice_settings = elevenlabs_settings.get(\"voice\")\n if not voice_settings:\n errors.append(\n \"Missing 'voice' section in 'elevenlabs' config. Please provide a voice configuration as shown in our example config.\"\n )\n return\n if not voice_settings.get(\"id\") and not voice_settings.get(\"name\"):\n errors.append(\n \"Missing 'id' or 'name' in 'voice' section of 'elevenlabs' config. Please provide a valid name or id for the voice in your config.\"\n )\n\n def __validate_azure_config(self, errors):\n if (\n self.tts_provider == \"azure\"\n or self.stt_provider == \"azure\"\n or self.conversation_provider == \"azure\"\n or self.summarize_provider == \"azure\"\n ):\n azure_settings = self.config.get(\"azure\")\n if not azure_settings:\n errors.append(\n \"Missing 'azure' section in config. Please provide a valid config.\"\n )\n return\n\n if self.tts_provider == \"azure\":\n self.azure_keys[\"tts\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_tts\",\n friendly_key_name=\"Azure TTS API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"tts\"]:\n errors.append(\n \"Missing 'azure' tts API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.stt_provider == \"azure\":\n self.azure_keys[\"whisper\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_whisper\",\n friendly_key_name=\"Azure Whisper API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"whisper\"]:\n errors.append(\n \"Missing 'azure' whisper API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.conversation_provider == \"azure\":\n self.azure_keys[\"conversation\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_conversation\",\n friendly_key_name=\"Azure Conversation API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"conversation\"]:\n errors.append(\n \"Missing 'azure' conversation API key. Please provide a valid key in the settings.\"\n )\n return\n\n if self.summarize_provider == \"azure\":\n self.azure_keys[\"summarize\"] = self.secret_keeper.retrieve(\n requester=self.name,\n key=\"azure_summarize\",\n friendly_key_name=\"Azure Summarize API key\",\n prompt_if_missing=True,\n )\n if not self.azure_keys[\"summarize\"]:\n errors.append(\n \"Missing 'azure' summarize API key. Please provide a valid key in the settings.\"\n )\n return\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the recorded audio to text using the OpenAI Whisper API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n str | None: The transcript of the audio file or None if the transcription failed.\n \"\"\"\n detect_language = self.config[\"edge_tts\"].get(\"detect_language\")\n\n response_format = (\n \"verbose_json\" # verbose_json will return the language detected in the transcript.\n if self.tts_provider == \"edge_tts\" and detect_language\n else \"json\"\n )\n\n azure_config = None\n if self.stt_provider == \"azure\":\n azure_config = self._get_azure_config(\"whisper\")\n\n transcript = self.openai.transcribe(\n audio_input_wav, response_format=response_format, azure_config=azure_config\n )\n\n locale = None\n # skip the GPT call if we didn't change the language\n if (\n response_format == \"verbose_json\"\n and transcript\n and transcript.language != self.last_transcript_locale # type: ignore\n ):\n printr.print(\n f\" EdgeTTS detected language '{transcript.language}'.\", tags=\"info\" # type: ignore\n )\n locale = self.__ask_gpt_for_locale(transcript.language) # type: ignore\n\n return transcript.text if transcript else None, locale\n\n def _get_azure_config(self, section: str):\n azure_api_key = self.azure_keys[section]\n azure_config = AzureConfig(\n api_key=azure_api_key,\n api_base_url=self.config[\"azure\"]\n .get(section, {})\n .get(\"api_base_url\", None),\n api_version=self.config[\"azure\"].get(section, {}).get(\"api_version\", None),\n deployment_name=self.config[\"azure\"]\n .get(section, {})\n .get(\"deployment_name\", None),\n )\n\n return azure_config\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Gets the response for a given transcript.\n\n This function interprets the transcript, runs instant commands if triggered,\n calls the OpenAI API when needed, processes any tool calls, and generates the final response.\n\n Args:\n transcript (str): The user's spoken text transcribed.\n\n Returns:\n A tuple of strings representing the response to a function call and an instant response.\n \"\"\"\n self.last_transcript_locale = locale\n self._add_user_message(transcript)\n\n instant_response = self._try_instant_activation(transcript)\n if instant_response:\n return instant_response, instant_response\n\n completion = self._gpt_call()\n\n if completion is None:\n return None, None\n\n response_message, tool_calls = self._process_completion(completion)\n\n # do not tamper with this message as it will lead to 400 errors!\n self.messages.append(response_message)\n\n if tool_calls:\n instant_response = await self._handle_tool_calls(tool_calls)\n if instant_response:\n return None, instant_response\n\n summarize_response = self._summarize_function_calls()\n return self._finalize_response(str(summarize_response))\n\n return response_message.content, response_message.content\n\n def _add_user_message(self, content: str):\n \"\"\"Shortens the conversation history if needed and adds a user message to it.\n\n Args:\n content (str): The message content to add.\n role (str): The role of the message sender (\"user\", \"assistant\", \"function\" or \"tool\").\n tool_call_id (Optional[str]): The identifier for the tool call, if applicable.\n name (Optional[str]): The name of the function associated with the tool call, if applicable.\n \"\"\"\n msg = {\"role\": \"user\", \"content\": content}\n self._cleanup_conversation_history()\n self.messages.append(msg)\n\n def _cleanup_conversation_history(self):\n \"\"\"Cleans up the conversation history by removing messages that are too old.\"\"\"\n remember_messages = self.config.get(\"features\", {}).get(\n \"remember_messages\", None\n )\n\n if remember_messages is None or len(self.messages) == 0:\n return 0 # Configuration not set, nothing to delete.\n\n # The system message aka `context` does not count\n context_offset = (\n 1 if self.messages and self.messages[0][\"role\"] == \"system\" else 0\n )\n\n # Find the cutoff index where to end deletion, making sure to only count 'user' messages towards the limit starting with newest messages.\n cutoff_index = len(self.messages) - 1\n user_message_count = 0\n for message in reversed(self.messages):\n if self.__get_message_role(message) == \"user\":\n user_message_count += 1\n if user_message_count == remember_messages:\n break # Found the cutoff point.\n cutoff_index -= 1\n\n # If messages below the keep limit, don't delete anything.\n if user_message_count < remember_messages:\n return 0\n\n total_deleted_messages = cutoff_index - context_offset # Messages to delete.\n\n # Remove the messages before the cutoff index, exclusive of the system message.\n del self.messages[context_offset:cutoff_index]\n\n # Optional debugging printout.\n if self.debug and total_deleted_messages > 0:\n printr.print(\n f\"Deleted {total_deleted_messages} messages from the conversation history.\",\n tags=\"warn\",\n )\n\n return total_deleted_messages\n\n def reset_conversation_history(self):\n \"\"\"Resets the conversation history by removing all messages except for the initial system message.\"\"\"\n del self.messages[1:]\n\n def _try_instant_activation(self, transcript: str) -> str:\n \"\"\"Tries to execute an instant activation command if present in the transcript.\n\n Args:\n transcript (str): The transcript to check for an instant activation command.\n\n Returns:\n str: The response to the instant command or None if no such command was found.\n \"\"\"\n command = self._execute_instant_activation_command(transcript)\n if command:\n response = self._select_command_response(command)\n return response\n return None\n\n def _gpt_call(self):\n \"\"\"Makes the primary GPT call with the conversation history and tools enabled.\n\n Returns:\n The GPT completion object or None if the call fails.\n \"\"\"\n if self.debug:\n printr.print(\n f\" Calling GPT with {(len(self.messages) - 1)} messages (excluding context)\",\n tags=\"info\",\n )\n\n azure_config = None\n if self.conversation_provider == \"azure\":\n azure_config = self._get_azure_config(\"conversation\")\n\n return self.openai.ask(\n messages=self.messages,\n tools=self._build_tools(),\n model=self.config[\"openai\"].get(\"conversation_model\"),\n azure_config=azure_config,\n )\n\n def _process_completion(self, completion):\n \"\"\"Processes the completion returned by the GPT call.\n\n Args:\n completion: The completion object from an OpenAI call.\n\n Returns:\n A tuple containing the message response and tool calls from the completion.\n \"\"\"\n response_message = completion.choices[0].message\n\n content = response_message.content\n if content is None:\n response_message.content = \"\"\n\n return response_message, response_message.tool_calls\n\n async def _handle_tool_calls(self, tool_calls):\n \"\"\"Processes all the tool calls identified in the response message.\n\n Args:\n tool_calls: The list of tool calls to process.\n\n Returns:\n str: The immediate response from processed tool calls or None if there are no immediate responses.\n \"\"\"\n instant_response = None\n function_response = \"\"\n\n for tool_call in tool_calls:\n function_name = tool_call.function.name\n function_args = json.loads(tool_call.function.arguments)\n (\n function_response,\n instant_response,\n ) = await self._execute_command_by_function_call(\n function_name, function_args\n )\n\n msg = {\"role\": \"tool\", \"content\": function_response}\n if tool_call.id is not None:\n msg[\"tool_call_id\"] = tool_call.id\n if function_name is not None:\n msg[\"name\"] = function_name\n\n # Don't use self._add_user_message_to_history here because we never want to skip this because of history limitions\n self.messages.append(msg)\n\n return instant_response\n\n def _summarize_function_calls(self):\n \"\"\"Summarizes the function call responses using the GPT model specified for summarization in the configuration.\n\n Returns:\n The content of the GPT response to the function call summaries.\n \"\"\"\n azure_config = None\n if self.summarize_provider == \"azure\":\n azure_config = self._get_azure_config(\"summarize\")\n\n summarize_model = self.config[\"openai\"].get(\"summarize_model\")\n summarize_response = self.openai.ask(\n messages=self.messages,\n model=summarize_model,\n azure_config=azure_config,\n )\n\n if summarize_response is None:\n return None\n\n # do not tamper with this message as it will lead to 400 errors!\n message = summarize_response.choices[0].message\n self.messages.append(message)\n return message.content\n\n def _finalize_response(self, summarize_response: str) -> tuple[str, str]:\n \"\"\"Finalizes the response based on the call of the second (summarize) GPT call.\n\n Args:\n summarize_response (str): The response content from the second GPT call.\n\n Returns:\n A tuple containing the final response to the user.\n \"\"\"\n if summarize_response is None:\n return self.messages[-1][\"content\"], self.messages[-1][\"content\"]\n return summarize_response, summarize_response\n\n async def _execute_command_by_function_call(\n self, function_name: str, function_args: dict[str, any]\n ) -> tuple[str, str]:\n \"\"\"\n Uses an OpenAI function call to execute a command. If it's an instant activation_command, one if its reponses will be played.\n\n Args:\n function_name (str): The name of the function to be executed.\n function_args (dict[str, any]): The arguments to pass to the function being executed.\n\n Returns:\n A tuple containing two elements:\n - function_response (str): The text response or result obtained after executing the function.\n - instant_response (str): An immediate response or action to be taken, if any (e.g., play audio).\n \"\"\"\n function_response = \"\"\n instant_reponse = \"\"\n if function_name == \"execute_command\":\n # get the command based on the argument passed by GPT\n command = self._get_command(function_args[\"command_name\"])\n # execute the command\n function_response = self._execute_command(command)\n # if the command has responses, we have to play one of them\n if command and command.get(\"responses\"):\n instant_reponse = self._select_command_response(command)\n await self._play_to_user(instant_reponse)\n\n return function_response, instant_reponse\n\n async def _play_to_user(self, text: str):\n \"\"\"Plays audio to the user using the configured TTS Provider (default: OpenAI TTS).\n Also adds sound effects if enabled in the configuration.\n\n Args:\n text (str): The text to play as audio.\n \"\"\"\n\n if self.tts_provider == \"edge_tts\":\n await self._play_with_edge_tts(text)\n elif self.tts_provider == \"elevenlabs\":\n self._play_with_elevenlabs(text)\n elif self.tts_provider == \"azure\":\n self._play_with_azure(text)\n else:\n self._play_with_openai(text)\n\n def _play_with_openai(self, text):\n response = self.openai.speak(text, self.config[\"openai\"].get(\"tts_voice\"))\n if response is not None:\n self.audio_player.stream_with_effects(response.content, self.config)\n\n def _play_with_azure(self, text):\n azure_config = self.config[\"azure\"].get(\"tts\", None)\n\n if azure_config is None:\n return\n\n speech_config = speechsdk.SpeechConfig(\n subscription=self.azure_keys[\"tts\"],\n region=azure_config[\"region\"],\n )\n speech_config.speech_synthesis_voice_name = azure_config[\"voice\"]\n\n if azure_config[\"detect_language\"]:\n auto_detect_source_language_config = (\n speechsdk.AutoDetectSourceLanguageConfig()\n )\n\n speech_synthesizer = speechsdk.SpeechSynthesizer(\n speech_config=speech_config,\n audio_config=None,\n auto_detect_source_language_config=auto_detect_source_language_config\n if azure_config[\"detect_language\"]\n else None,\n )\n\n result = speech_synthesizer.speak_text_async(text).get()\n if result is not None:\n self.audio_player.stream_with_effects(result.audio_data, self.config)\n\n async def _play_with_edge_tts(self, text: str):\n edge_config = self.config[\"edge_tts\"]\n\n tts_voice = edge_config.get(\"tts_voice\")\n detect_language = edge_config.get(\"detect_language\")\n if detect_language:\n gender = edge_config.get(\"gender\")\n tts_voice = await self.edge_tts.get_same_random_voice_for_language(\n gender, self.last_transcript_locale\n )\n\n communicate, output_file = await self.edge_tts.generate_speech(\n text, voice=tts_voice\n )\n audio, sample_rate = self.audio_player.get_audio_from_file(output_file)\n\n self.audio_player.stream_with_effects((audio, sample_rate), self.config)\n\n def _play_with_elevenlabs(self, text: str):\n # presence already validated in validate()\n elevenlabs_config = self.config[\"elevenlabs\"]\n # validate() already checked that either id or name is set\n voice_id = elevenlabs_config[\"voice\"].get(\"id\")\n voice_name = elevenlabs_config[\"voice\"].get(\"name\")\n\n voice_settings = elevenlabs_config.get(\"voice_settings\", {})\n user = ElevenLabsUser(self.elevenlabs_api_key)\n model = elevenlabs_config.get(\"model\", \"eleven_multilingual_v2\")\n\n voice: (\n ElevenLabsVoice\n | ElevenLabsDesignedVoice\n | ElevenLabsClonedVoice\n | ElevenLabsProfessionalVoice\n ) = None\n if voice_id:\n voice = user.get_voice_by_ID(voice_id)\n else:\n voice = user.get_voices_by_name(voice_name)[0]\n\n # todo: add start/end callbacks to play Quindar beep even if use_sound_effects is disabled\n playback_options = PlaybackOptions(runInBackground=True)\n generation_options = GenerationOptions(\n model=model,\n latencyOptimizationLevel=elevenlabs_config.get(\"latency\", 0),\n style=voice_settings.get(\"style\", 0),\n use_speaker_boost=voice_settings.get(\"use_speaker_boost\", True),\n )\n stability = voice_settings.get(\"stability\")\n if stability is not None:\n generation_options.stability = stability\n\n similarity_boost = voice_settings.get(\"similarity_boost\")\n if similarity_boost is not None:\n generation_options.similarity_boost = similarity_boost\n\n style = voice_settings.get(\"style\")\n if style is not None and model != \"eleven_turbo_v2\":\n generation_options.style = style\n\n use_sound_effects = elevenlabs_config.get(\"use_sound_effects\", False)\n if use_sound_effects:\n audio_bytes, _history_id = voice.generate_audio_v2(\n prompt=text,\n generationOptions=generation_options,\n )\n if audio_bytes:\n self.audio_player.stream_with_effects(audio_bytes, self.config)\n else:\n voice.generate_stream_audio_v2(\n prompt=text,\n playbackOptions=playback_options,\n generationOptions=generation_options,\n )\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Does what Wingman base does, but always returns \"Ok\" instead of a command response.\n Otherwise the AI will try to respond to the command and generate a \"duplicate\" response for instant_activation commands.\n \"\"\"\n super()._execute_command(command)\n return \"Ok\"\n\n def _build_tools(self) -> list[dict]:\n \"\"\"\n Builds a tool for each command that is not instant_activation.\n\n Returns:\n list[dict]: A list of tool descriptors in OpenAI format.\n \"\"\"\n commands = [\n command[\"name\"]\n for command in self.config.get(\"commands\", [])\n if not command.get(\"instant_activation\")\n ]\n tools = [\n {\n \"type\": \"function\",\n \"function\": {\n \"name\": \"execute_command\",\n \"description\": \"Executes a command\",\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"command_name\": {\n \"type\": \"string\",\n \"description\": \"The command to execute\",\n \"enum\": commands,\n },\n },\n \"required\": [\"command_name\"],\n },\n },\n },\n ]\n return tools\n\n def __ask_gpt_for_locale(self, language: str) -> str:\n \"\"\"OpenAI TTS returns a natural language name for the language of the transcript, e.g. \"german\" or \"english\".\n This method uses ChatGPT to find the corresponding locale, e.g. \"de-DE\" or \"en-EN\".\n\n Args:\n language (str): The natural, lowercase language name returned by OpenAI TTS. Thank you for that btw.. WTF OpenAI?\n \"\"\"\n\n response = self.openai.ask(\n messages=[\n {\n \"content\": \"\"\"\n I'll say a natural language name in lowercase and you'll just return the IETF country code / locale for this language.\n Your answer always has exactly 2 lowercase letters, a dash, then two more letters in uppercase.\n If I say \"german\", you answer with \"de-DE\". If I say \"russian\", you answer with \"ru-RU\".\n If it's ambiguous and you don't know which locale to pick (\"en-GB\" vs \"en-US\"), you pick the most commonly used one.\n You only answer with valid country codes according to most common standards.\n If you can't, you respond with \"None\".\n \"\"\",\n \"role\": \"system\",\n },\n {\n \"content\": language,\n \"role\": \"user\",\n },\n ],\n model=\"gpt-3.5-turbo-1106\",\n )\n answer = response.choices[0].message.content\n\n if answer == \"None\":\n return None\n\n printr.print(\n f\" ChatGPT says this language maps to locale '{answer}'.\", tags=\"info\"\n )\n return answer\n\n def __get_message_role(self, message):\n \"\"\"Helper method to get the role of the message regardless of its type.\"\"\"\n if isinstance(message, Mapping):\n return message.get(\"role\")\n elif hasattr(message, \"role\"):\n return message.role\n else:\n raise TypeError(\n f\"Message is neither a mapping nor has a 'role' attribute: {message}\"\n )" }, { "identifier": "Wingman", "path": "wingmen/wingman.py", "snippet": "class Wingman(FileCreator):\n \"\"\"The \"highest\" Wingman base class in the chain. It does some very basic things but is meant to be 'virtual', and so are most its methods, so you'll probably never instantiate it directly.\n\n Instead, you'll create a custom wingman that inherits from this (or a another subclass of it) and override its methods if needed.\n \"\"\"\n\n def __init__(\n self,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n ):\n \"\"\"The constructor of the Wingman class. You can override it in your custom wingman.\n\n Args:\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n app_root_dir (str): The path to the root directory of the app. This is where the Wingman executable lives.\n \"\"\"\n\n super().__init__(app_root_dir=app_root_dir, subdir=\"wingman_data\")\n\n self.config = config\n \"\"\"All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\"\"\"\n\n self.secret_keeper = secret_keeper\n \"\"\"A service that allows you to store and retrieve secrets like API keys. It can prompt the user for secrets if necessary.\"\"\"\n\n self.name = name\n \"\"\"The name of the wingman. This is the key you gave it in the config, e.g. \"atc\".\"\"\"\n\n self.audio_player = AudioPlayer()\n \"\"\"A service that allows you to play audio files and add sound effects to them.\"\"\"\n\n self.execution_start: None | float = None\n \"\"\"Used for benchmarking executon times. The timer is (re-)started whenever the process function starts.\"\"\"\n\n self.debug: bool = self.config[\"features\"].get(\"debug_mode\", False)\n \"\"\"If enabled, the Wingman will skip executing any keypresses. It will also print more debug messages and benchmark results.\"\"\"\n\n self.tts_provider = self.config[\"features\"].get(\"tts_provider\")\n \"\"\"The name of the TTS provider you configured in the config.yaml\"\"\"\n\n self.app_root_dir = app_root_dir\n \"\"\"The path to the root directory of the app. This is where the Wingman executable lives.\"\"\"\n\n @staticmethod\n def create_dynamically(\n module_path: str,\n class_name: str,\n name: str,\n config: dict[str, Any],\n secret_keeper: SecretKeeper,\n app_root_dir: str,\n **kwargs,\n ):\n \"\"\"Dynamically creates a Wingman instance from a module path and class name\n\n Args:\n module_path (str): The module path, e.g. wingmen.open_ai_wingman. It's like the filepath from root to your custom-wingman.py but with dots instead of slashes and without the .py extension. Case-sensitive!\n class_name (str): The name of the class inside your custom-wingman.py, e.g. OpenAiWingman. Case-sensitive!\n name (str): The name of the wingman. This is the key you gave it in the config, e.g. \"atc\"\n config (dict[str, any]): All \"general\" config entries merged with the specific Wingman config settings. The Wingman takes precedence and overrides the general config. You can just add new keys to the config and they will be available here.\n \"\"\"\n\n module = import_module(module_path)\n DerivedWingmanClass = getattr(module, class_name)\n instance = DerivedWingmanClass(\n name=name,\n config=config,\n secret_keeper=secret_keeper,\n app_root_dir=app_root_dir,\n **kwargs,\n )\n return instance\n\n def get_record_key(self) -> str:\n \"\"\"Returns the activation or \"push-to-talk\" key for this Wingman.\"\"\"\n return self.config.get(\"record_key\", None)\n\n def print_execution_time(self, reset_timer=False):\n \"\"\"Prints the current time since the execution started (in seconds).\"\"\"\n if self.execution_start:\n execution_stop = time.perf_counter()\n elapsed_seconds = execution_stop - self.execution_start\n printr.print(f\"...took {elapsed_seconds:.2f}s\", tags=\"info\")\n if reset_timer:\n self.start_execution_benchmark()\n\n def start_execution_benchmark(self):\n \"\"\"Starts the execution benchmark timer.\"\"\"\n self.execution_start = time.perf_counter()\n\n # ──────────────────────────────────── Hooks ─────────────────────────────────── #\n\n def validate(self) -> list[str]:\n \"\"\"Use this function to validate params and config before the Wingman is started.\n If you add new config sections or entries to your custom wingman, you should validate them here.\n\n It's a good idea to collect all errors from the base class and not to swallow them first.\n\n If you return errors, your Wingman will be disabled by Tower and not be loaded.\n\n Returns:\n list[str]: A list of error messages or an empty list if everything is okay.\n \"\"\"\n return []\n\n # TODO: this should be async\n def prepare(self):\n \"\"\"This method is called only once when the Wingman is instantiated by Tower.\n It is run AFTER validate() so you can access validated params safely here.\n\n You can override it if you need to load async data from an API or file.\"\"\"\n pass\n\n def reset_conversation_history(self):\n \"\"\"This function is called when the user triggers the ResetConversationHistory command.\n It's a global command that should be implemented by every Wingman that keeps a message history.\n \"\"\"\n\n # ──────────────────────────── The main processing loop ──────────────────────────── #\n\n async def process(self, audio_input_wav: str):\n \"\"\"The main method that gets called when the wingman is activated. This method controls what your wingman actually does and you can override it if you want to.\n\n The base implementation here triggers the transcription and processing of the given audio input.\n If you don't need even transcription, you can just override this entire process method. If you want transcription but then do something in addition, you can override the listed hooks.\n\n Async so you can do async processing, e.g. send a request to an API.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Hooks:\n - async _transcribe: transcribe the audio to text\n - async _get_response_for_transcript: process the transcript and return a text response\n - async _play_to_user: do something with the response, e.g. play it as audio\n \"\"\"\n\n self.start_execution_benchmark()\n\n process_result = None\n\n if self.debug:\n printr.print(\"Starting transcription...\", tags=\"info\")\n\n # transcribe the audio.\n transcript, locale = await self._transcribe(audio_input_wav)\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n if transcript:\n printr.print(f\">> (You): {transcript}\", tags=\"violet\")\n\n if self.debug:\n printr.print(\"Getting response for transcript...\", tags=\"info\")\n\n # process the transcript further. This is where you can do your magic. Return a string that is the \"answer\" to your passed transcript.\n process_result, instant_response = await self._get_response_for_transcript(\n transcript, locale\n )\n\n if self.debug:\n self.print_execution_time(reset_timer=True)\n\n actual_response = instant_response or process_result\n printr.print(f\"<< ({self.name}): {actual_response}\", tags=\"green\")\n\n if self.debug:\n printr.print(\"Playing response back to user...\", tags=\"info\")\n\n # the last step in the chain. You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n await self._play_to_user(str(process_result))\n\n if self.debug:\n self.print_execution_time()\n\n # ───────────────── virtual methods / hooks ───────────────── #\n\n async def _transcribe(self, audio_input_wav: str) -> tuple[str | None, str | None]:\n \"\"\"Transcribes the audio to text. You can override this method if you want to use a different transcription service.\n\n Args:\n audio_input_wav (str): The path to the audio file that contains the user's speech. This is a recording of what you you said.\n\n Returns:\n tuple[str | None, str | None]: The transcript of the audio file and the detected language as locale (if determined).\n \"\"\"\n return None, None\n\n async def _get_response_for_transcript(\n self, transcript: str, locale: str | None\n ) -> tuple[str, str]:\n \"\"\"Processes the transcript and return a response as text. This where you'll do most of your work.\n Pass the transcript to AI providers and build a conversation. Call commands or APIs. Play temporary results to the user etc.\n\n\n Args:\n transcript (str): The user's spoken text transcribed as text.\n locale (str | None): The language that was detected to be used in the transcript, e.g. \"de-DE\".\n\n Returns:\n A tuple of strings representing the response to a function call and/or an instant response.\n \"\"\"\n return (\"\", \"\")\n\n async def _play_to_user(self, text: str):\n \"\"\"You'll probably want to play the response to the user as audio using a TTS provider or mechanism of your choice.\n\n Args:\n text (str): The response of your _get_response_for_transcript. This is usually the \"response\" from conversation with the AI.\n \"\"\"\n pass\n\n # ───────────────────────────────── Commands ─────────────────────────────── #\n\n def _get_command(self, command_name: str) -> dict | None:\n \"\"\"Extracts the command with the given name\n\n Args:\n command_name (str): the name of the command you used in the config\n\n Returns:\n {}: The command object from the config\n \"\"\"\n\n command = next(\n (\n item\n for item in self.config.get(\"commands\", [])\n if item[\"name\"] == command_name\n ),\n None,\n )\n return command\n\n def _select_command_response(self, command: dict) -> str | None:\n \"\"\"Returns one of the configured responses of the command. This base implementation returns a random one.\n\n Args:\n command (dict): The command object from the config\n\n Returns:\n str: A random response from the command's responses list in the config.\n \"\"\"\n command_responses = command.get(\"responses\", None)\n if (command_responses is None) or (len(command_responses) == 0):\n return None\n\n return random.choice(command_responses)\n\n def _execute_instant_activation_command(self, transcript: str) -> dict | None:\n \"\"\"Uses a fuzzy string matching algorithm to match the transcript to a configured instant_activation command and executes it immediately.\n\n Args:\n transcript (text): What the user said, transcripted to text. Needs to be similar to one of the defined instant_activation phrases to work.\n\n Returns:\n {} | None: The executed instant_activation command.\n \"\"\"\n\n instant_activation_commands = [\n command\n for command in self.config.get(\"commands\", [])\n if command.get(\"instant_activation\")\n ]\n\n # check if transcript matches any instant activation command. Each command has a list of possible phrases\n for command in instant_activation_commands:\n for phrase in command.get(\"instant_activation\"):\n ratio = SequenceMatcher(\n None,\n transcript.lower(),\n phrase.lower(),\n ).ratio()\n if (\n ratio > 0.8\n ): # if the ratio is higher than 0.8, we assume that the command was spoken\n self._execute_command(command)\n\n if command.get(\"responses\"):\n return command\n return None\n return None\n\n def _execute_command(self, command: dict) -> str:\n \"\"\"Triggers the execution of a command. This base implementation executes the keypresses defined in the command.\n\n Args:\n command (dict): The command object from the config to execute\n\n Returns:\n str: the selected response from the command's responses list in the config. \"Ok\" if there are none.\n \"\"\"\n\n if not command:\n return \"Command not found\"\n\n printr.print(f\"❖ Executing command: {command.get('name')}\", tags=\"info\")\n\n if self.debug:\n printr.print(\n \"Skipping actual keypress execution in debug_mode...\", tags=\"warn\"\n )\n\n if len(command.get(\"keys\", [])) > 0 and not self.debug:\n self.execute_keypress(command)\n # TODO: we could do mouse_events here, too...\n\n # handle the global special commands:\n if command.get(\"name\", None) == \"ResetConversationHistory\":\n self.reset_conversation_history()\n\n if not self.debug:\n # in debug mode we already printed the separate execution times\n self.print_execution_time()\n\n return self._select_command_response(command) or \"Ok\"\n\n def execute_keypress(self, command: dict):\n \"\"\"Executes the keypresses defined in the command in order.\n\n pydirectinput uses SIGEVENTS to send keypresses to the OS. This lib seems to be the only way to send keypresses to games reliably.\n\n It only works on Windows. For MacOS, we fall back to PyAutoGUI (which has the exact same API as pydirectinput is built on top of it).\n\n Args:\n command (dict): The command object from the config to execute\n \"\"\"\n\n for entry in command.get(\"keys\", []):\n if entry.get(\"modifier\"):\n key_module.keyDown(entry[\"modifier\"])\n\n if entry.get(\"hold\"):\n key_module.keyDown(entry[\"key\"])\n time.sleep(entry[\"hold\"])\n key_module.keyUp(entry[\"key\"])\n else:\n key_module.press(entry[\"key\"])\n\n if entry.get(\"modifier\"):\n key_module.keyUp(entry[\"modifier\"])\n\n if entry.get(\"wait\"):\n time.sleep(entry[\"wait\"])" }, { "identifier": "Printr", "path": "services/printr.py", "snippet": "class Printr(object):\n _instance = None\n\n LILA = \"\\033[95m\"\n BLUE = \"\\033[94m\"\n CYAN = \"\\033[96m\"\n GREEN = \"\\033[92m\"\n YELLOW = \"\\033[93m\"\n RED = \"\\033[91m\"\n CLEAR = \"\\033[0m\"\n BOLD = \"\\033[1m\"\n FAINT = \"\\033[2m\"\n NORMAL_WEIGHT = \"\\033[22m\"\n UNDERLINE = \"\\033[4m\"\n END_UNDERLINE = \"\\033[24m\"\n OVERLINE = \"\\033[53m\"\n END_OVERLINE = \"\\033[55m\"\n FRAMED = \"\\033[51m\"\n ENCIRCLED = \"\\033[52m\"\n DELETE_LINE = \"\\033[2K\\033[1G\"\n PREVIOUS_LINE = \"\\033[2F\"\n\n tags = [\n # {\"tagName\": \"bold\", \"font\": \"TkTextFont bold\"},\n {\"tagName\": \"info\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"warn\", \"foreground\": \"orange\"},\n {\"tagName\": \"err\", \"foreground\": \"red\"},\n\n {\"tagName\": \"green\", \"foreground\": \"#33cc33\"},\n {\"tagName\": \"blue\", \"foreground\": \"#6699ff\"},\n {\"tagName\": \"violet\", \"foreground\": \"#aa33dd\"},\n {\"tagName\": \"grey\", \"foreground\": \"grey\"}\n ]\n\n CHANNEL = Literal[\"main\", \"error\", \"warning\", \"info\"]\n OUTPUT_TYPES = None | ctk.StringVar | ctk.CTkTextbox\n\n _message_stacks: dict[CHANNEL, list] = dict(\n main=[],\n error=[],\n warning=[],\n info=[]\n )\n\n # NOTE this is a singleton class\n def __new__(cls):\n if cls._instance is None:\n cls._instance = super(Printr, cls).__new__(cls)\n\n cls.out: dict[Printr.CHANNEL, Printr.OUTPUT_TYPES ] = dict(\n main=None,\n error=None,\n warning=None,\n info=None\n )\n return cls._instance\n\n\n def set_output(self, output_channel: CHANNEL, output_element: OUTPUT_TYPES):\n if isinstance(output_element, ctk.CTkTextbox):\n for tag in self.tags:\n output_element.tag_config(**tag)\n\n self.out[output_channel] = output_element\n\n msg_stack = self._message_stacks.get(output_channel, [])\n if len(msg_stack) > 0:\n msg = \"\\n\".join(msg_stack)\n self.print(msg, output_channel)\n # TODO: clear stack?\n for _ in range(len(msg_stack)):\n msg_stack.pop()\n\n\n\n def print(self, text, output_channel: CHANNEL = \"main\", tags=None, wait_for_gui=False, console_only=False):\n channel = self.out.get(output_channel, None)\n if channel and not console_only:\n if isinstance(channel, ctk.CTkTextbox):\n channel.configure(state=\"normal\")\n channel.insert(\"end\", f\"{text}\\n\", tags=tags)\n channel.see(\"end\")\n channel.configure(state=\"disabled\")\n else:\n # output type -> StringVar\n channel.set(text)\n elif wait_for_gui and not console_only:\n # message should only be shown in GUI\n # so add it to the queue to wait for GUI initialization\n self._message_stacks.get(output_channel, []).append(text)\n else:\n # no special output type -> terminal output\n print(text)\n\n\n def print_err(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"error\", wait_for_gui=wait_for_gui)\n\n def print_warn(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"warning\", wait_for_gui=wait_for_gui)\n\n def print_info(self, text, wait_for_gui=True):\n self.print(text, output_channel=\"info\", wait_for_gui=wait_for_gui)\n\n\n @staticmethod\n def clr(text, color_format):\n return f\"{color_format}{text}{Printr.CLEAR}\"\n\n @staticmethod\n def clr_print(text, color_format):\n print(Printr.clr(text, color_format))\n\n @staticmethod\n def sys_print(text, headline=\"\", color=RED, first_message=True):\n if first_message:\n print(\"\")\n if headline.strip():\n print(\n Printr.clr(f\"{Printr.BOLD}{headline}{Printr.NORMAL_WEIGHT}\", color)\n )\n else:\n print(Printr.PREVIOUS_LINE)\n print(Printr.clr(f\"⎢ {text}\", color))\n print(\"\")\n\n @staticmethod\n def err_print(text, first_message=True):\n Printr.sys_print(text, \"Something went wrong!\", first_message=first_message)\n\n @staticmethod\n def warn_print(text, first_message=True):\n Printr.sys_print(text, \"Please note:\", Printr.YELLOW, first_message)\n\n @staticmethod\n def info_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.BLUE, first_message)\n\n @staticmethod\n def hl_print(text, first_message=True):\n Printr.sys_print(text, \"\", Printr.CYAN, first_message)\n\n @staticmethod\n def override_print(text):\n print(f\"{Printr.DELETE_LINE}{text}\")\n\n @staticmethod\n def box_start():\n print(\n f\"{Printr.CYAN}⎡{Printr.OVERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_OVERLINE}⎤\"\n )\n print(f\"⎢{Printr.CLEAR}\")\n\n @staticmethod\n def box_end():\n print(f\"{Printr.CYAN}⎢\")\n print(\n f\"⎣{Printr.UNDERLINE}⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊⑊{Printr.END_UNDERLINE}⎦{Printr.CLEAR}\"\n )\n\n @staticmethod\n def box_print(text):\n print(f\"{Printr.CYAN}⎜{Printr.CLEAR} {text}\")" }, { "identifier": "SecretKeeper", "path": "services/secret_keeper.py", "snippet": "class SecretKeeper:\n def __init__(self, app_root_path: str):\n self.printr = Printr()\n self.system_config_path: str = os.path.join(app_root_path, SYSTEM_CONFIG_PATH)\n self.config_file = os.path.join(self.system_config_path, SECRETS_FILE)\n self.secrets = self.__load()\n if not self.secrets:\n self.secrets = {}\n\n def __load(self) -> dict[str, any]: # type: ignore\n parsed_config = None\n\n if os.path.exists(self.config_file) and os.path.isfile(self.config_file):\n with open(self.config_file, \"r\", encoding=\"UTF-8\") as stream:\n try:\n parsed_config = yaml.safe_load(stream)\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not load ({SECRETS_FILE})\\n{str(e)}\", True\n )\n\n return parsed_config\n\n def save(self):\n \"\"\"Write all secrets to the file\"\"\"\n with open(self.config_file, \"w\", encoding=\"UTF-8\") as stream:\n try:\n yaml.dump(self.secrets, stream)\n return True\n except yaml.YAMLError as e:\n self.printr.print_err(\n f\"Could not write ({SECRETS_FILE})\\n{str(e)}\", True\n )\n return False\n\n def retrieve(\n self,\n requester: str,\n key: str,\n friendly_key_name: str,\n prompt_if_missing: bool = True,\n ) -> str:\n \"\"\"Retrieve secret a secret and optionally prompt user for it if missing\"\"\"\n\n secret = self.secrets.get(key, None)\n if not secret and prompt_if_missing:\n # Prompt user for key\n dialog = ctk.CTkInputDialog(\n text=f\"Please enter '{friendly_key_name}':\",\n title=f\"{requester} needs to know a secret\",\n )\n secret = dialog.get_input()\n if secret:\n secret = secret.strip().replace(\"\\n\", \"\")\n self.secrets[key] = secret\n self.save()\n\n return secret" } ]
import copy from exceptions import MissingApiKeyException from wingmen.open_ai_wingman import OpenAiWingman from wingmen.wingman import Wingman from services.printr import Printr from services.secret_keeper import SecretKeeper
12,682
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper self.key_wingman_dict: dict[str, Wingman] = {} self.broken_wingmen = [] self.wingmen = self.__instantiate_wingmen() self.key_wingman_dict: dict[str, Wingman] = {} for wingman in self.wingmen: self.key_wingman_dict[wingman.get_record_key()] = wingman def __instantiate_wingmen(self) -> list[Wingman]: wingmen = [] for wingman_name, wingman_config in self.config["wingmen"].items(): if wingman_config.get("disabled") is True: continue global_config = { "sound": self.config.get("sound", {}), "openai": self.config.get("openai", {}), "features": self.config.get("features", {}), "edge_tts": self.config.get("edge_tts", {}), "commands": self.config.get("commands", {}), "elevenlabs": self.config.get("elevenlabs", {}), "azure": self.config.get("azure", {}), } merged_config = self.__merge_configs(global_config, wingman_config) class_config = merged_config.get("class") wingman = None # it's a custom Wingman try: if class_config: kwargs = class_config.get("args", {}) wingman = Wingman.create_dynamically( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, module_path=class_config.get("module"), class_name=class_config.get("name"), app_root_dir=self.app_root_dir, **kwargs ) else: wingman = OpenAiWingman( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, app_root_dir=self.app_root_dir, )
printr = Printr() class Tower: def __init__(self, config: dict[str, any], secret_keeper: SecretKeeper, app_root_dir: str): # type: ignore self.config = config self.app_root_dir = app_root_dir self.secret_keeper = secret_keeper self.key_wingman_dict: dict[str, Wingman] = {} self.broken_wingmen = [] self.wingmen = self.__instantiate_wingmen() self.key_wingman_dict: dict[str, Wingman] = {} for wingman in self.wingmen: self.key_wingman_dict[wingman.get_record_key()] = wingman def __instantiate_wingmen(self) -> list[Wingman]: wingmen = [] for wingman_name, wingman_config in self.config["wingmen"].items(): if wingman_config.get("disabled") is True: continue global_config = { "sound": self.config.get("sound", {}), "openai": self.config.get("openai", {}), "features": self.config.get("features", {}), "edge_tts": self.config.get("edge_tts", {}), "commands": self.config.get("commands", {}), "elevenlabs": self.config.get("elevenlabs", {}), "azure": self.config.get("azure", {}), } merged_config = self.__merge_configs(global_config, wingman_config) class_config = merged_config.get("class") wingman = None # it's a custom Wingman try: if class_config: kwargs = class_config.get("args", {}) wingman = Wingman.create_dynamically( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, module_path=class_config.get("module"), class_name=class_config.get("name"), app_root_dir=self.app_root_dir, **kwargs ) else: wingman = OpenAiWingman( name=wingman_name, config=merged_config, secret_keeper=self.secret_keeper, app_root_dir=self.app_root_dir, )
except MissingApiKeyException:
0
2023-11-15 09:36:06+00:00
16k
wjun0830/CGDETR
cg_detr/inference.py
[ { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "TestOptions", "path": "cg_detr/config.py", "snippet": "class TestOptions(BaseOptions):\n \"\"\"add additional options for evaluating\"\"\"\n\n def initialize(self):\n BaseOptions.initialize(self)\n # also need to specify --eval_split_name\n self.parser.add_argument(\"--eval_id\", type=str, help=\"evaluation id\")\n self.parser.add_argument(\"--eval_results_dir\", type=str, default=None,\n help=\"dir to save results, if not set, fall back to training results_dir\")\n self.parser.add_argument(\"--model_dir\", type=str,\n help=\"dir contains the model file, will be converted to absolute path afterwards\")" }, { "identifier": "build_model", "path": "cg_detr/model.py", "snippet": "def build_model(args):\n device = torch.device(args.device)\n\n transformer = build_transformer(args)\n position_embedding, txt_position_embedding = build_position_encoding(args)\n\n if args.a_feat_dir is None:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n else:\n model = CGDETR(\n transformer,\n position_embedding,\n txt_position_embedding,\n txt_dim=args.t_feat_dim,\n vid_dim=args.v_feat_dim,\n aud_dim=args.a_feat_dim,\n num_queries=args.num_queries,\n input_dropout=args.input_dropout,\n aux_loss=args.aux_loss,\n contrastive_align_loss=args.contrastive_align_loss,\n contrastive_hdim=args.contrastive_hdim,\n span_loss_type=args.span_loss_type,\n use_txt_pos=args.use_txt_pos,\n n_input_proj=args.n_input_proj,\n args=args\n )\n\n matcher = build_matcher(args)\n weight_dict = {\"loss_span\": args.span_loss_coef,\n \"loss_giou\": args.giou_loss_coef,\n \"loss_label\": args.label_loss_coef,\n \"loss_saliency\": args.lw_saliency,\n \"loss_ms_align\": args.lw_ms_align,\n \"loss_distill\": args.lw_distill,\n \"loss_orthogonal_dummy\":args.lw_distill}\n if args.contrastive_align_loss:\n weight_dict[\"loss_contrastive_align\"] = args.contrastive_align_loss_coef\n\n if args.aux_loss:\n aux_weight_dict = {}\n for i in range(args.dec_layers - 1):\n aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items() if k != \"loss_saliency\"})\n weight_dict.update(aux_weight_dict)\n\n losses = ['spans', 'labels', 'saliency', 'ms_align', 'distill', 'orthogonal_dummy']\n if args.contrastive_align_loss:\n losses += [\"contrastive_align\"]\n \n # For highlight detection datasets\n use_matcher = not (args.dset_name in ['youtube_uni', 'tvsum'])\n \n criterion = SetCriterion(\n matcher=matcher, weight_dict=weight_dict, losses=losses,\n eos_coef=args.eos_coef, temperature=args.temperature,\n span_loss_type=args.span_loss_type, max_v_l=args.max_v_l,\n saliency_margin=args.saliency_margin, use_matcher=use_matcher, args=args\n )\n criterion.to(device)\n return model, criterion" }, { "identifier": "span_cxw_to_xx", "path": "cg_detr/span_utils.py", "snippet": "def span_cxw_to_xx(cxw_spans):\n \"\"\"\n Args:\n cxw_spans: tensor, (#windows, 2) or (..., 2), the last dim is a row denoting a window of format (center, width)\n\n >>> spans = torch.Tensor([[0.5000, 1.0000], [0.3000, 0.2000]])\n >>> span_cxw_to_xx(spans)\n tensor([[0.0000, 1.0000],\n [0.2000, 0.4000]])\n >>> spans = torch.Tensor([[[0.5000, 1.0000], [0.3000, 0.2000]]])\n >>> span_cxw_to_xx(spans)\n tensor([[[0.0000, 1.0000],\n [0.2000, 0.4000]]])\n \"\"\"\n x1 = cxw_spans[..., 0] - 0.5 * cxw_spans[..., 1]\n x2 = cxw_spans[..., 0] + 0.5 * cxw_spans[..., 1]\n return torch.stack([x1, x2], dim=-1)" }, { "identifier": "StartEndDataset", "path": "cg_detr/start_end_dataset.py", "snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)" }, { "identifier": "start_end_collate", "path": "cg_detr/start_end_dataset.py", "snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data" }, { "identifier": "prepare_batch_inputs", "path": "cg_detr/start_end_dataset.py", "snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets" }, { "identifier": "PostProcessorDETR", "path": "cg_detr/postprocessing_cg_detr.py", "snippet": "class PostProcessorDETR:\n def __init__(self, clip_length=2, min_ts_val=0, max_ts_val=150,\n min_w_l=2, max_w_l=70, move_window_method=\"center\",\n process_func_names=(\"clip_window_l\", \"clip_ts\", \"round_multiple\")):\n self.clip_length = clip_length\n self.min_ts_val = min_ts_val\n self.max_ts_val = max_ts_val\n self.min_w_l = min_w_l\n self.max_w_l = max_w_l\n self.move_window_method = move_window_method\n self.process_func_names = process_func_names\n self.name2func = dict(\n clip_ts=self.clip_min_max_timestamps,\n round_multiple=self.round_to_multiple_clip_lengths,\n clip_window_l=self.clip_window_lengths\n )\n\n def __call__(self, lines):\n processed_lines = []\n for line in tqdm(lines, desc=f\"convert to multiples of clip_length={self.clip_length}\"):\n windows_and_scores = torch.tensor(line[\"pred_relevant_windows\"])\n windows = windows_and_scores[:, :2]\n for func_name in self.process_func_names:\n windows = self.name2func[func_name](windows)\n line[\"pred_relevant_windows\"] = torch.cat(\n [windows, windows_and_scores[:, 2:3]], dim=1).tolist()\n line[\"pred_relevant_windows\"] = [e[:2] + [float(f\"{e[2]:.4f}\")] for e in line[\"pred_relevant_windows\"]]\n processed_lines.append(line)\n return processed_lines\n\n def clip_min_max_timestamps(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure timestamps for all windows is within [min_val, max_val], clip is out of boundaries.\n \"\"\"\n return torch.clamp(windows, min=self.min_ts_val, max=self.max_ts_val)\n\n def round_to_multiple_clip_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) torch.Tensor\n ensure the final window timestamps are multiples of `clip_length`\n \"\"\"\n return torch.round(windows / self.clip_length) * self.clip_length\n\n def clip_window_lengths(self, windows):\n \"\"\"\n windows: (#windows, 2) np.ndarray\n ensure the final window duration are within [self.min_w_l, self.max_w_l]\n \"\"\"\n window_lengths = windows[:, 1] - windows[:, 0]\n small_rows = window_lengths < self.min_w_l\n if torch.sum(small_rows) > 0:\n windows = self.move_windows(\n windows, small_rows, self.min_w_l, move_method=self.move_window_method)\n large_rows = window_lengths > self.max_w_l\n if torch.sum(large_rows) > 0:\n windows = self.move_windows(\n windows, large_rows, self.max_w_l, move_method=self.move_window_method)\n return windows\n\n @classmethod\n def move_windows(cls, windows, row_selector, new_length, move_method=\"left\"):\n \"\"\"\n Args:\n windows:\n row_selector:\n new_length:\n move_method: str,\n left: keep left unchanged\n center: keep center unchanged\n right: keep right unchanged\n\n Returns:\n\n \"\"\"\n # import ipdb;\n # ipdb.set_trace()\n if move_method == \"left\":\n windows[row_selector, 1] = windows[row_selector, 0] + new_length\n elif move_method == \"right\":\n windows[row_selector, 0] = windows[row_selector, 1] - new_length\n elif move_method == \"center\":\n center = (windows[row_selector, 1] + windows[row_selector, 0]) / 2.\n windows[row_selector, 0] = center - new_length / 2.\n windows[row_selector, 1] = center + new_length / 2.\n return windows" }, { "identifier": "eval_submission", "path": "standalone_eval/eval.py", "snippet": "def eval_submission(submission, ground_truth, verbose=True, match_number=True):\n \"\"\"\n Args:\n submission: list(dict), each dict is {\n qid: str,\n query: str,\n vid: str,\n pred_relevant_windows: list([st, ed]),\n pred_saliency_scores: list(float), len == #clips in video.\n i.e., each clip in the video will have a saliency score.\n }\n ground_truth: list(dict), each dict is {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17]\n \"saliency_scores\": [[4, 4, 2], [3, 4, 2], [2, 2, 3], [2, 2, 2], [0, 1, 3]]\n each sublist corresponds to one clip in relevant_clip_ids.\n The 3 elements in the sublist are scores from 3 different workers. The\n scores are in [0, 1, 2, 3, 4], meaning [Very Bad, ..., Good, Very Good]\n }\n verbose:\n match_number:\n\n Returns:\n\n \"\"\"\n pred_qids = set([e[\"qid\"] for e in submission])\n gt_qids = set([e[\"qid\"] for e in ground_truth])\n if match_number:\n assert pred_qids == gt_qids, \\\n f\"qids in ground_truth and submission must match. \" \\\n f\"use `match_number=False` if you wish to disable this check\"\n else: # only leave the items that exists in both submission and ground_truth\n shared_qids = pred_qids.intersection(gt_qids)\n submission = [e for e in submission if e[\"qid\"] in shared_qids]\n ground_truth = [e for e in ground_truth if e[\"qid\"] in shared_qids]\n\n eval_metrics = {}\n eval_metrics_brief = OrderedDict()\n if \"pred_relevant_windows\" in submission[0]:\n moment_ret_scores = eval_moment_retrieval(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(moment_ret_scores)\n moment_ret_scores_brief = {\n \"MR-full-mAP\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"average\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-mAP\"][\"0.75\"],\n \"MR-short-mAP\": moment_ret_scores[\"short\"][\"MR-mAP\"][\"average\"],\n \"MR-middle-mAP\": moment_ret_scores[\"middle\"][\"MR-mAP\"][\"average\"],\n \"MR-long-mAP\": moment_ret_scores[\"long\"][\"MR-mAP\"][\"average\"],\n \"MR-full-mIoU\": moment_ret_scores[\"full\"][\"MR-mIoU\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.3\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.5\"],\n \"[email protected]\": moment_ret_scores[\"full\"][\"MR-R1\"][\"0.7\"],\n }\n eval_metrics_brief.update(\n sorted([(k, v) for k, v in moment_ret_scores_brief.items()], key=lambda x: x[0]))\n\n if \"pred_saliency_scores\" in submission[0]:\n highlight_det_scores = eval_highlight(\n submission, ground_truth, verbose=verbose)\n eval_metrics.update(highlight_det_scores)\n highlight_det_scores_brief = dict([\n (f\"{k}-{sub_k.split('-')[1]}\", v[sub_k])\n for k, v in highlight_det_scores.items() for sub_k in v])\n eval_metrics_brief.update(highlight_det_scores_brief)\n\n # sort by keys\n final_eval_metrics = OrderedDict()\n final_eval_metrics[\"brief\"] = eval_metrics_brief\n final_eval_metrics.update(sorted([(k, v) for k, v in eval_metrics.items()], key=lambda x: x[0]))\n return final_eval_metrics" }, { "identifier": "save_jsonl", "path": "utils/basic_utils.py", "snippet": "def save_jsonl(data, filename):\n \"\"\"data is a list\"\"\"\n with open(filename, \"w\") as f:\n f.write(\"\\n\".join([json.dumps(e) for e in data]))" }, { "identifier": "save_json", "path": "utils/basic_utils.py", "snippet": "def save_json(data, filename, save_pretty=False, sort_keys=False):\n with open(filename, \"w\") as f:\n if save_pretty:\n f.write(json.dumps(data, indent=4, sort_keys=sort_keys))\n else:\n json.dump(data, f)" }, { "identifier": "temporal_nms", "path": "utils/temporal_nms.py", "snippet": "def temporal_nms(predictions, nms_thd, max_after_nms=100):\n \"\"\"\n Args:\n predictions: list(sublist), each sublist is [st (float), ed(float), score (float)],\n note larger scores are better and are preserved. For metrics that are better when smaller,\n please convert to its negative, e.g., convert distance to negative distance.\n nms_thd: float in [0, 1]\n max_after_nms:\n Returns:\n predictions_after_nms: list(sublist), each sublist is [st (float), ed(float), score (float)]\n References:\n https://github.com/wzmsltw/BSN-boundary-sensitive-network/blob/7b101fc5978802aa3c95ba5779eb54151c6173c6/Post_processing.py#L42\n \"\"\"\n if len(predictions) == 1: # only has one prediction, no need for nms\n return predictions\n\n predictions = sorted(predictions, key=lambda x: x[2], reverse=True) # descending order\n\n tstart = [e[0] for e in predictions]\n tend = [e[1] for e in predictions]\n tscore = [e[2] for e in predictions]\n rstart = []\n rend = []\n rscore = []\n while len(tstart) > 1 and len(rscore) < max_after_nms: # max 100 after nms\n idx = 1\n while idx < len(tstart): # compare with every prediction in the list.\n if compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]) > nms_thd:\n # rm highly overlapped lower score entries.\n tstart.pop(idx)\n tend.pop(idx)\n tscore.pop(idx)\n # print(\"--------------------------------\")\n # print(compute_temporal_iou([tstart[0], tend[0]], [tstart[idx], tend[idx]]))\n # print([tstart[0], tend[0]], [tstart[idx], tend[idx]])\n # print(tstart.pop(idx), tend.pop(idx), tscore.pop(idx))\n else:\n # move to next\n idx += 1\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n if len(rscore) < max_after_nms and len(tstart) >= 1: # add the last, possibly empty.\n rstart.append(tstart.pop(0))\n rend.append(tend.pop(0))\n rscore.append(tscore.pop(0))\n\n predictions_after_nms = [[st, ed, s] for s, st, ed in zip(rscore, rstart, rend)]\n return predictions_after_nms" } ]
import pprint import numpy as np import os import torch import torch.nn.functional as F import torch.backends.cudnn as cudnn import logging from tqdm import tqdm, trange from collections import OrderedDict, defaultdict from utils.basic_utils import AverageMeter from torch.utils.data import DataLoader from cg_detr.config import TestOptions from cg_detr.model import build_model from cg_detr.span_utils import span_cxw_to_xx from cg_detr.start_end_dataset import StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.postprocessing_cg_detr import PostProcessorDETR from standalone_eval.eval import eval_submission from utils.basic_utils import save_jsonl, save_json from utils.temporal_nms import temporal_nms from collections import OrderedDict from sys import argv
11,379
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json") save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False) latest_file_paths = [submission_path, save_metrics_path] else: metrics = None latest_file_paths = [submission_path, ] if opt.nms_thd != -1: logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd)) submission_after_nms = post_processing_mr_nms( submission, nms_thd=opt.nms_thd, max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms ) logger.info("Saving/Evaluating nms results") submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd)) save_jsonl(submission_after_nms, submission_nms_path) if opt.eval_split_name == "val": metrics_nms = eval_submission( submission_after_nms, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json") save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False) latest_file_paths += [submission_nms_path, save_metrics_nms_path] else: metrics_nms = None latest_file_paths = [submission_nms_path, ] else: metrics_nms = None return metrics, metrics_nms, latest_file_paths # for HL @torch.no_grad() def compute_hl_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): model.eval() if criterion: assert eval_loader.dataset.load_labels criterion.eval() loss_meters = defaultdict(AverageMeter) write_tb = tb_writer is not None and epoch_i is not None mr_res = [] topk = 5 # top-5 map video_ap_collected = [] for batch in tqdm(eval_loader, desc="compute st ed scores"): query_meta = batch[0]
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def post_processing_mr_nms(mr_res, nms_thd, max_before_nms, max_after_nms): mr_res_after_nms = [] for e in mr_res: e["pred_relevant_windows"] = temporal_nms( e["pred_relevant_windows"][:max_before_nms], nms_thd=nms_thd, max_after_nms=max_after_nms ) mr_res_after_nms.append(e) return mr_res_after_nms def eval_epoch_post_processing(submission, opt, gt_data, save_submission_filename): # IOU_THDS = (0.5, 0.7) logger.info("Saving/Evaluating before nms results") submission_path = os.path.join(opt.results_dir, save_submission_filename) save_jsonl(submission, submission_path) if opt.eval_split_name in ["val"]: # since test_public has no GT metrics = eval_submission( submission, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_path = submission_path.replace(".jsonl", "_metrics.json") save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False) latest_file_paths = [submission_path, save_metrics_path] else: metrics = None latest_file_paths = [submission_path, ] if opt.nms_thd != -1: logger.info("[MR] Performing nms with nms_thd {}".format(opt.nms_thd)) submission_after_nms = post_processing_mr_nms( submission, nms_thd=opt.nms_thd, max_before_nms=opt.max_before_nms, max_after_nms=opt.max_after_nms ) logger.info("Saving/Evaluating nms results") submission_nms_path = submission_path.replace(".jsonl", "_nms_thd_{}.jsonl".format(opt.nms_thd)) save_jsonl(submission_after_nms, submission_nms_path) if opt.eval_split_name == "val": metrics_nms = eval_submission( submission_after_nms, gt_data, verbose=opt.debug, match_number=not opt.debug ) save_metrics_nms_path = submission_nms_path.replace(".jsonl", "_metrics.json") save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False) latest_file_paths += [submission_nms_path, save_metrics_nms_path] else: metrics_nms = None latest_file_paths = [submission_nms_path, ] else: metrics_nms = None return metrics, metrics_nms, latest_file_paths # for HL @torch.no_grad() def compute_hl_results(model, eval_loader, opt, epoch_i=None, criterion=None, tb_writer=None): model.eval() if criterion: assert eval_loader.dataset.load_labels criterion.eval() loss_meters = defaultdict(AverageMeter) write_tb = tb_writer is not None and epoch_i is not None mr_res = [] topk = 5 # top-5 map video_ap_collected = [] for batch in tqdm(eval_loader, desc="compute st ed scores"): query_meta = batch[0]
model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory)
6
2023-11-10 12:45:25+00:00
16k
zhang-tao-whu/DVIS_Plus
ov_dvis/meta_architecture_ov.py
[ { "identifier": "VideoSetCriterion_ov", "path": "mask2former_video/modeling/criterion.py", "snippet": "class VideoSetCriterion_ov(VideoSetCriterion):\n \"\"\"This class computes the loss for DETR.\n The process happens in two steps:\n 1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n 2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n \"\"\"\n\n def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses,\n num_points, oversample_ratio, importance_sample_ratio, frames=2):\n \"\"\"Create the criterion.\n Parameters:\n num_classes: number of object categories, omitting the special no-object category\n matcher: module able to compute a matching between targets and proposals\n weight_dict: dict containing as key the names of the losses and as values their relative weight.\n eos_coef: relative classification weight applied to the no-object category\n losses: list of all the losses to be applied. See get_loss for list of available losses.\n \"\"\"\n super(VideoSetCriterion, self).__init__()\n self.num_classes = num_classes\n self.matcher = matcher\n self.weight_dict = weight_dict\n self.eos_coef = eos_coef\n self.losses = losses\n\n # pointwise mask loss parameters\n self.num_points = num_points\n self.oversample_ratio = oversample_ratio\n self.importance_sample_ratio = importance_sample_ratio\n self.frames = frames\n\n def loss_labels(self, outputs, targets, indices, num_masks):\n \"\"\"Classification loss (NLL)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"].float()\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2], src_logits.shape[2] - 1, dtype=torch.int64, device=src_logits.device\n )\n target_classes[idx] = target_classes_o.to(target_classes)\n\n empty_weight = torch.ones(src_logits.shape[2])\n empty_weight[-1] = self.eos_coef\n empty_weight = empty_weight.to(src_logits)\n\n loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, empty_weight)\n losses = {\"loss_ce\": loss_ce}\n return losses" }, { "identifier": "VideoHungarianMatcher", "path": "mask2former_video/modeling/matcher.py", "snippet": "class VideoHungarianMatcher(nn.Module):\n \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n \"\"\"\n\n def __init__(self, cost_class: float = 1, cost_mask: float = 1, cost_dice: float = 1, num_points: int = 0):\n \"\"\"Creates the matcher\n\n Params:\n cost_class: This is the relative weight of the classification error in the matching cost\n cost_mask: This is the relative weight of the focal loss of the binary mask in the matching cost\n cost_dice: This is the relative weight of the dice loss of the binary mask in the matching cost\n \"\"\"\n super().__init__()\n self.cost_class = cost_class\n self.cost_mask = cost_mask\n self.cost_dice = cost_dice\n\n assert cost_class != 0 or cost_mask != 0 or cost_dice != 0, \"all costs cant be 0\"\n\n self.num_points = num_points\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n indices = []\n\n # Iterate through batch size\n for b in range(bs):\n\n out_prob = outputs[\"pred_logits\"][b].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[b][\"labels\"].to(torch.int64)\n\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n try:\n cost_class = -out_prob[:, tgt_ids]\n except:\n cost_class = 0.0\n print(tgt_ids)\n\n out_mask = outputs[\"pred_masks\"][b] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[b][\"masks\"].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # out_mask = out_mask[:, None]\n # tgt_mask = tgt_mask[:, None]\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n\n indices.append(linear_sum_assignment(C))\n\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n # [per image indicates], per image indicates -> (pred inds, gt inds)\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Performs the matching\n\n Params:\n outputs: This is a dict that contains at least these entries:\n \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n \"pred_masks\": Tensor of dim [batch_size, num_queries, H_pred, W_pred] with the predicted masks\n\n targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n objects in the target) containing the class labels\n \"masks\": Tensor of dim [num_target_boxes, H_gt, W_gt] containing the target masks\n\n Returns:\n A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected targets (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n \"\"\"\n return self.memory_efficient_forward(outputs, targets)\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_mask: {}\".format(self.cost_mask),\n \"cost_dice: {}\".format(self.cost_dice),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "VideoHungarianMatcher_Consistent", "path": "mask2former_video/modeling/matcher.py", "snippet": "class VideoHungarianMatcher_Consistent(VideoHungarianMatcher):\n \"\"\"\n Only match in the first frame where the object appears in the GT.\n \"\"\"\n def __init__(self, cost_class: float = 1, cost_mask: float = 1,\n cost_dice: float = 1, num_points: int = 0,\n frames: int = 5):\n super().__init__(\n cost_class=cost_class, cost_mask=cost_mask,\n cost_dice=cost_dice, num_points=num_points,\n )\n self.frames = frames\n\n @torch.no_grad()\n def memory_efficient_forward(self, outputs, targets):\n \"\"\"More memory-friendly matching\"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n indices = []\n\n # Iterate through batch size\n for b in range(bs // self.frames):\n # find the fist frame where the object appears\n id_apper_frame = {}\n for f in range(self.frames):\n overall_bs = b * self.frames + f\n instance_ids = targets[overall_bs][\"ids\"]\n valid = torch.nonzero(instance_ids.squeeze(1) != -1)\n for v in valid:\n v = v.item()\n if v not in id_apper_frame.keys():\n id_apper_frame[v] = f\n\n # obtain the object ID that first appears in each frame\n apper_frame_id = {}\n for id in id_apper_frame.keys():\n f = id_apper_frame[id]\n if f in apper_frame_id:\n apper_frame_id[f].append(id)\n else:\n apper_frame_id[f] = [id]\n need_match_frames = list(apper_frame_id.keys())\n need_match_frames.sort()\n\n # per frame match\n used_query_idx = []\n matched_indices = [[], []]\n for f in need_match_frames:\n overall_bs = b * self.frames + f\n used_tgt = apper_frame_id[f]\n out_prob = outputs[\"pred_logits\"][overall_bs].softmax(-1) # [num_queries, num_classes]\n tgt_ids = targets[overall_bs][\"labels\"][used_tgt]\n\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n\n out_mask = outputs[\"pred_masks\"][overall_bs] # [num_queries, T, H_pred, W_pred]\n # gt masks are already padded when preparing target\n tgt_mask = targets[overall_bs][\"masks\"][used_tgt].to(out_mask) # [num_gts, T, H_pred, W_pred]\n\n # all masks share the same set of points for efficient matching!\n point_coords = torch.rand(1, self.num_points, 2, device=out_mask.device)\n # get gt labels\n tgt_mask = point_sample(\n tgt_mask,\n point_coords.repeat(tgt_mask.shape[0], 1, 1).to(tgt_mask),\n align_corners=False,\n ).flatten(1)\n\n out_mask = point_sample(\n out_mask,\n point_coords.repeat(out_mask.shape[0], 1, 1).to(out_mask),\n align_corners=False,\n ).flatten(1)\n\n with autocast(enabled=False):\n out_mask = out_mask.float()\n tgt_mask = tgt_mask.float()\n # Compute the focal loss between masks\n cost_mask = batch_sigmoid_ce_loss_jit(out_mask, tgt_mask)\n\n # Compute the dice loss betwen masks\n cost_dice = batch_dice_loss_jit(out_mask, tgt_mask)\n\n # Final cost matrix\n C = (\n self.cost_mask * cost_mask\n + self.cost_class * cost_class\n + self.cost_dice * cost_dice\n )\n C = C.reshape(num_queries, -1).cpu()\n if len(used_query_idx) != 0:\n C[used_query_idx, :] = 1e6\n indice1, indice2 = linear_sum_assignment(C)\n\n used_query_idx += list(indice1)\n\n indice2 = np.array(used_tgt)[indice2]\n matched_indices[0] += list(indice1)\n matched_indices[1] += list(indice2)\n indices += [matched_indices] * self.frames\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]" }, { "identifier": "retry_if_cuda_oom", "path": "mask2former_video/utils/memory.py", "snippet": "def retry_if_cuda_oom(func):\n \"\"\"\n Makes a function retry itself after encountering\n pytorch's CUDA OOM error.\n It will first retry after calling `torch.cuda.empty_cache()`.\n If that still fails, it will then retry by trying to convert inputs to CPUs.\n In this case, it expects the function to dispatch to CPU implementation.\n The return values may become CPU tensors as well and it's user's\n responsibility to convert it back to CUDA tensor if needed.\n Args:\n func: a stateless callable that takes tensor-like objects as arguments\n Returns:\n a callable which retries `func` if OOM is encountered.\n Examples:\n ::\n output = retry_if_cuda_oom(some_torch_function)(input1, input2)\n # output may be on CPU even if inputs are on GPU\n Note:\n 1. When converting inputs to CPU, it will only look at each argument and check\n if it has `.device` and `.to` for conversion. Nested structures of tensors\n are not supported.\n 2. Since the function might be called more than once, it has to be\n stateless.\n \"\"\"\n\n def maybe_to_cpu(x):\n try:\n like_gpu_tensor = x.device.type == \"cuda\" and hasattr(x, \"to\")\n except AttributeError:\n like_gpu_tensor = False\n if like_gpu_tensor:\n return x.to(device=\"cpu\").to(torch.float32)\n else:\n return x\n\n @wraps(func)\n def wrapped(*args, **kwargs):\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Clear cache and retry\n torch.cuda.empty_cache()\n with _ignore_torch_cuda_oom():\n return func(*args, **kwargs)\n\n # Try on CPU. This slows down the code significantly, therefore print a notice.\n logger = logging.getLogger(__name__)\n logger.info(\"Attempting to copy inputs to CPU due to CUDA OOM\")\n new_args = (maybe_to_cpu(x) for x in args)\n new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}\n with autocast(enabled=False):\n return func(*new_args, **new_kwargs)\n\n return wrapped" }, { "identifier": "ReferringTracker_noiser_OV", "path": "ov_dvis/video_dvis_modules_ov.py", "snippet": "class ReferringTracker_noiser_OV(torch.nn.Module):\n def __init__(\n self,\n hidden_channel=256,\n feedforward_channel=2048,\n num_head=8,\n decoder_layer_num=6,\n mask_dim=256,\n noise_mode='hard',\n # frozen fc-clip head\n mask_pooling=None,\n mask_pooling_proj=None,\n class_embed=None,\n logit_scale=None,\n mask_embed=None,\n decoder_norm=None,\n ):\n super(ReferringTracker_noiser_OV, self).__init__()\n\n # init transformer layers\n self.num_heads = num_head\n self.num_layers = decoder_layer_num\n self.transformer_self_attention_layers = nn.ModuleList()\n self.transformer_cross_attention_layers = nn.ModuleList()\n self.transformer_ffn_layers = nn.ModuleList()\n\n for _ in range(self.num_layers):\n self.transformer_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_cross_attention_layers.append(\n ReferringCrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_ffn_layers.append(\n FFNLayer(\n d_model=hidden_channel,\n dim_feedforward=feedforward_channel,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n # for cl learning\n self.ref_proj = MLP(hidden_channel, hidden_channel, hidden_channel, 3)\n # for reference and query merge\n self.merge = nn.Linear(hidden_channel * 2, hidden_channel)\n\n # record previous frame information\n self.last_outputs = None\n self.last_frame_embeds = None\n self.last_reference = None\n\n self.noiser = Noiser(noise_ratio=0.5, mode=noise_mode)\n\n # FC-CLIP\n self.mask_pooling = mask_pooling\n self._mask_pooling_proj = mask_pooling_proj\n self.class_embed = class_embed\n self.logit_scale = logit_scale\n self.mask_embed = mask_embed\n self.decoder_norm = decoder_norm\n\n def _clear_memory(self):\n del self.last_outputs\n self.last_outputs = None\n self.last_reference = None\n return\n\n def forward(self, frame_embeds, mask_features, resume=False,\n return_indices=False, frame_classes=None,\n frame_embeds_no_norm=None, cur_feature=None,\n text_classifier=None, num_templates=None,\n ):\n \"\"\"\n :param frame_embeds: the instance queries output by the segmenter\n :param mask_features: the mask features output by the segmenter\n :param resume: whether the first frame is the start of the video\n :param return_indices: whether return the match indices\n :return: output dict, including masks, classes, embeds.\n \"\"\"\n\n frame_embeds = frame_embeds.permute(2, 3, 0, 1) # t, q, b, c\n if frame_embeds_no_norm is not None:\n frame_embeds_no_norm = frame_embeds_no_norm.permute(2, 3, 0, 1) # t, q, b, c\n n_frame, n_q, bs, _ = frame_embeds.size()\n outputs = []\n ret_indices = []\n\n all_frames_references = []\n\n for i in range(n_frame):\n ms_output = []\n single_frame_embeds = frame_embeds[i] # q b c\n if frame_embeds_no_norm is not None:\n single_frame_embeds_no_norm = frame_embeds_no_norm[i]\n else:\n single_frame_embeds_no_norm = single_frame_embeds\n if frame_classes is None:\n single_frame_classes = None\n else:\n single_frame_classes = frame_classes[i]\n\n frame_key = single_frame_embeds_no_norm\n\n # the first frame of a video\n if i == 0 and resume is False:\n self._clear_memory()\n for j in range(self.num_layers):\n if j == 0:\n indices, noised_init = self.noiser(\n single_frame_embeds,\n single_frame_embeds,\n cur_embeds_no_norm=single_frame_embeds_no_norm,\n activate=False,\n cur_classes=single_frame_classes,\n )\n ms_output.append(single_frame_embeds_no_norm[indices])\n self.last_frame_embeds = single_frame_embeds[indices]\n ret_indices.append(indices)\n output = self.transformer_cross_attention_layers[j](\n noised_init, self.ref_proj(frame_key),\n frame_key, single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n else:\n output = self.transformer_cross_attention_layers[j](\n ms_output[-1], self.ref_proj(ms_output[-1]),\n frame_key, single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n self.last_reference = self.ref_proj(frame_key)\n else:\n reference = self.ref_proj(self.last_outputs[-1])\n self.last_reference = reference\n\n for j in range(self.num_layers):\n if j == 0:\n indices, noised_init = self.noiser(\n self.last_frame_embeds,\n single_frame_embeds,\n cur_embeds_no_norm=single_frame_embeds_no_norm,\n activate=self.training,\n cur_classes=single_frame_classes,\n )\n ms_output.append(single_frame_embeds_no_norm[indices])\n self.last_frame_embeds = single_frame_embeds[indices]\n ret_indices.append(indices)\n output = self.transformer_cross_attention_layers[j](\n noised_init, reference, frame_key,\n single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n else:\n output = self.transformer_cross_attention_layers[j](\n ms_output[-1], reference, frame_key,\n single_frame_embeds_no_norm,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n output = self.transformer_self_attention_layers[j](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n # FFN\n output = self.transformer_ffn_layers[j](\n output\n )\n ms_output.append(output)\n\n all_frames_references.append(self.last_reference)\n\n ms_output = torch.stack(ms_output, dim=0) # (1 + layers, q, b, c)\n self.last_outputs = ms_output\n outputs.append(ms_output[1:])\n outputs = torch.stack(outputs, dim=0) # (t, l, q, b, c)\n all_frames_references = torch.stack(all_frames_references, dim=0) # (t, q, b, c)\n\n mask_features_ = mask_features\n if not self.training:\n outputs = outputs[:, -1:]\n del mask_features\n outputs_class, outputs_masks = self.prediction(outputs, mask_features_, text_classifier,\n num_templates, all_frames_references)\n out = {\n 'pred_logits': outputs_class[-1].transpose(1, 2), # (b, t, q, c)\n 'pred_masks': outputs_masks[-1], # (b, q, t, h, w)\n 'aux_outputs': self._set_aux_loss(\n outputs_class, outputs_masks\n ),\n 'pred_embds': outputs[:, -1].permute(2, 3, 0, 1), # (b, c, t, q)\n 'pred_references': all_frames_references.permute(2, 3, 0, 1), # (b, c, t, q),\n }\n if return_indices:\n return out, ret_indices\n else:\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_seg_masks):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{\"pred_logits\": a.transpose(1, 2), \"pred_masks\": b}\n for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])\n ]\n\n def prediction(self, outputs, mask_features, text_classifier, num_templates, references):\n # outputs (t, l, q, b, c)\n # mask_features (b, t, c, h, w)\n decoder_output = self.decoder_norm(outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\"lbtqc,btchw->lbqthw\", mask_embed, mask_features)\n\n references = references.unsqueeze(1).repeat(1, decoder_output.size(0), 1, 1, 1).permute(1, 3, 0, 2, 4)\n decoder_output_cls = torch.cat([references, decoder_output], dim=-1)\n decoder_output_cls = self.merge(decoder_output_cls)\n\n # fc-clip class head forward\n # mean pooling\n b, t, c, _, _ = mask_features.shape\n l, b, q, t, _, _ = outputs_mask.shape\n mask_features = mask_features.unsqueeze(0).repeat(l, 1, 1, 1, 1, 1).flatten(0, 2) # lbt, c, h, w\n outputs_mask_ = outputs_mask.permute(0, 1, 3, 2, 4, 5).flatten(0, 2) # (lbt, q, h, w)\n maskpool_embeddings = self.mask_pooling(x=mask_features, mask=outputs_mask_) # [lbt, q, c]\n maskpool_embeddings = maskpool_embeddings.reshape(l, b, t, *maskpool_embeddings.shape[-2:]) # (l b t q c)\n maskpool_embeddings = self._mask_pooling_proj(maskpool_embeddings)\n class_embed = self.class_embed(maskpool_embeddings + decoder_output_cls)\n outputs_class = get_classification_logits(class_embed, text_classifier, self.logit_scale, num_templates)\n outputs_class = outputs_class.transpose(2, 3) # (l, b, q, t, cls+1)\n\n return outputs_class, outputs_mask" }, { "identifier": "TemporalRefiner_OV", "path": "ov_dvis/video_dvis_modules_ov.py", "snippet": "class TemporalRefiner_OV(torch.nn.Module):\n def __init__(\n self,\n hidden_channel=256,\n feedforward_channel=2048,\n num_head=8,\n decoder_layer_num=6,\n mask_dim=256,\n class_num=25,\n windows=5,\n # resume segmenter prediction head\n mask_pooling=None,\n mask_pooling_proj=None,\n class_embed=None,\n logit_scale=None,\n mask_embed=None,\n decoder_norm=None,\n ):\n super(TemporalRefiner_OV, self).__init__()\n\n self.windows = windows\n\n # init transformer layers\n self.num_heads = num_head\n self.num_layers = decoder_layer_num\n self.transformer_obj_self_attention_layers = nn.ModuleList()\n self.transformer_time_self_attention_layers = nn.ModuleList()\n self.transformer_cross_attention_layers = nn.ModuleList()\n self.transformer_ffn_layers = nn.ModuleList()\n\n self.conv_short_aggregate_layers = nn.ModuleList()\n self.conv_norms = nn.ModuleList()\n\n for _ in range(self.num_layers):\n self.transformer_time_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.conv_short_aggregate_layers.append(\n nn.Sequential(\n nn.Conv1d(hidden_channel, hidden_channel,\n kernel_size=5, stride=1,\n padding='same', padding_mode='replicate'),\n nn.ReLU(inplace=True),\n nn.Conv1d(hidden_channel, hidden_channel,\n kernel_size=3, stride=1,\n padding='same', padding_mode='replicate'),\n )\n )\n\n self.conv_norms.append(nn.LayerNorm(hidden_channel))\n\n self.transformer_obj_self_attention_layers.append(\n SelfAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_cross_attention_layers.append(\n CrossAttentionLayer(\n d_model=hidden_channel,\n nhead=num_head,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.transformer_ffn_layers.append(\n FFNLayer(\n d_model=hidden_channel,\n dim_feedforward=feedforward_channel,\n dropout=0.0,\n normalize_before=False,\n )\n )\n\n self.decoder_norm = nn.LayerNorm(hidden_channel)\n\n # FC-CLIP\n self.mask_pooling = mask_pooling\n self._mask_pooling_proj = mask_pooling_proj\n self.class_embed = class_embed\n self.logit_scale = logit_scale\n self.mask_embed = mask_embed\n self.decoder_norm = decoder_norm\n\n self.activation_proj = nn.Linear(hidden_channel, 1)\n\n def forward(self, instance_embeds, frame_embeds, mask_features,\n text_classifier=None, num_templates=None,):\n \"\"\"\n :param instance_embeds: the aligned instance queries output by the tracker, shape is (b, c, t, q)\n :param frame_embeds: the instance queries processed by the tracker.frame_forward function, shape is (b, c, t, q)\n :param mask_features: the mask features output by the segmenter, shape is (b, t, c, h, w)\n :return: output dict, including masks, classes, embeds.\n \"\"\"\n n_batch, n_channel, n_frames, n_instance = instance_embeds.size()\n\n outputs = []\n output = instance_embeds\n frame_embeds = frame_embeds.permute(3, 0, 2, 1).flatten(1, 2)\n\n for i in range(self.num_layers):\n output = output.permute(2, 0, 3, 1) # (t, b, q, c)\n output = output.flatten(1, 2) # (t, bq, c)\n\n # do long temporal attention\n output = self.transformer_time_self_attention_layers[i](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n\n # do short temporal conv\n output = output.permute(1, 2, 0) # (bq, c, t)\n output = self.conv_norms[i](\n (self.conv_short_aggregate_layers[i](output) + output).transpose(1, 2)\n ).transpose(1, 2)\n output = output.reshape(\n n_batch, n_instance, n_channel, n_frames\n ).permute(1, 0, 3, 2).flatten(1, 2) # (q, bt, c)\n\n # do objects self attention\n output = self.transformer_obj_self_attention_layers[i](\n output, tgt_mask=None,\n tgt_key_padding_mask=None,\n query_pos=None\n )\n\n # do cross attention\n output = self.transformer_cross_attention_layers[i](\n output, frame_embeds,\n memory_mask=None,\n memory_key_padding_mask=None,\n pos=None, query_pos=None\n )\n\n # FFN\n output = self.transformer_ffn_layers[i](\n output\n )\n\n output = output.reshape(n_instance, n_batch, n_frames, n_channel).permute(1, 3, 2, 0) # (b, c, t, q)\n outputs.append(output)\n\n outputs = torch.stack(outputs, dim=0).permute(3, 0, 4, 1, 2) # (l, b, c, t, q) -> (t, l, q, b, c)\n outputs_class, outputs_masks = self.prediction(outputs, mask_features, text_classifier, num_templates)\n outputs = self.decoder_norm(outputs)\n out = {\n 'pred_logits': outputs_class[-1].transpose(1, 2), # (b, t, q, c)\n 'pred_masks': outputs_masks[-1], # (b, q, t, h, w)\n 'aux_outputs': self._set_aux_loss(\n outputs_class, outputs_masks\n ),\n 'pred_embds': outputs[:, -1].permute(2, 3, 0, 1) # (b, c, t, q)\n }\n return out\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_seg_masks):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [{\"pred_logits\": a.transpose(1, 2), \"pred_masks\": b}\n for a, b in zip(outputs_class[:-1], outputs_seg_masks[:-1])\n ]\n\n def windows_prediction(self, outputs, mask_features, text_classifier, num_templates, windows=5):\n \"\"\"\n for windows prediction, because mask features consumed too much GPU memory\n \"\"\"\n iters = outputs.size(0) // windows\n if outputs.size(0) % windows != 0:\n iters += 1\n outputs_classes = []\n outputs_masks = []\n maskpool_embeddings = []\n pixel_nums = []\n for i in range(iters):\n start_idx = i * windows\n end_idx = (i + 1) * windows\n clip_outputs = outputs[start_idx:end_idx]\n decoder_output = self.decoder_norm(clip_outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n mask_embed = self.mask_embed(decoder_output)\n mask_features_clip = mask_features[:, start_idx:end_idx].to(mask_embed.device)\n outputs_mask = torch.einsum(\n \"lbtqc,btchw->lbqthw\",\n mask_embed,\n mask_features_clip\n )\n maskpool_embedding, pixel_num = self._get_maskpool_embeddings(mask_features_clip, outputs_mask, return_num=True)\n maskpool_embeddings.append(maskpool_embedding) # (l b 1 q c)\n pixel_nums.append(pixel_num) # (l b 1 q)\n outputs_classes.append(decoder_output)\n outputs_masks.append(outputs_mask.cpu().to(torch.float32))\n outputs_classes = torch.cat(outputs_classes, dim=2)\n T = outputs.size(0)\n outputs_classes = self._temoral_weighting(outputs_classes) # (l, b, 1, q, c)\n\n maskpool_embeddings = torch.cat(maskpool_embeddings, dim=2)\n pixel_nums = torch.cat(pixel_nums, dim=2)\n pixel_nums = pixel_nums / torch.sum(pixel_nums, dim=2, keepdim=True)\n maskpool_embeddings = maskpool_embeddings * pixel_nums.unsqueeze(-1)\n maskpool_embeddings = torch.sum(maskpool_embeddings, dim=2, keepdim=True)\n maskpool_embeddings = self._mask_pooling_proj(maskpool_embeddings) # (l b 1 q c)\n\n class_embed = self.class_embed(maskpool_embeddings + outputs_classes)\n outputs_classes = get_classification_logits(class_embed, text_classifier, self.logit_scale, num_templates)\n outputs_classes = outputs_classes.repeat(1, 1, T, 1, 1).transpose(2, 3) # (l, b, q, t, cls+1)\n return outputs_classes.cpu().to(torch.float32), torch.cat(outputs_masks, dim=3)\n\n def _get_maskpool_embeddings(self, mask_features, outputs_mask, return_num=False):\n b, t, c, _, _ = mask_features.shape\n l, b, q, t, _, _ = outputs_mask.shape\n\n mask_features = mask_features.unsqueeze(0).repeat(l,\n 1, 1, 1, 1, 1).permute(0, 1, 3, 2, 4, 5).flatten(0, 1).flatten(2, 3) # lb, c, th, w\n outputs_mask_ = outputs_mask.flatten(0, 1).flatten(2, 3) # (lb, q, th, w)\n if return_num:\n maskpool_embeddings, pixel_num = self.mask_pooling(x=mask_features, mask=outputs_mask_, return_num=True)\n # maskpool_embeddings [lb, q, c], pixel_num [lb, q]\n pixel_num = pixel_num.reshape(l, b, q)\n pixel_num = pixel_num.unsqueeze(2) # (l b 1 q)\n else:\n maskpool_embeddings = self.mask_pooling(x=mask_features, mask=outputs_mask_) # [lb, q, c]\n maskpool_embeddings = maskpool_embeddings.reshape(l, b, *maskpool_embeddings.shape[-2:]) # (l b q c)\n maskpool_embeddings = maskpool_embeddings.unsqueeze(2) # (l b 1 q c)\n if return_num:\n return maskpool_embeddings, pixel_num\n else:\n return maskpool_embeddings\n\n def _temoral_weighting(self, decoder_output):\n # compute the weighted average of the decoder_output\n activation = self.activation_proj(decoder_output).softmax(dim=2) # (l, b, t, q, 1)\n class_output = (decoder_output * activation).sum(dim=2, keepdim=True) # (l, b, 1, q, c)\n return class_output\n\n def pred_class(self, decoder_output, mask_features, outputs_mask, text_classifier, num_templates):\n \"\"\"\n fuse the objects queries of all frames and predict an overall score based on the fused objects queries\n :param decoder_output: instance queries, shape is (l, b, t, q, c)\n \"\"\"\n T = decoder_output.size(2)\n\n # compute the weighted average of the decoder_output\n class_output = self._temoral_weighting(decoder_output)\n\n # fc-clip class head forward\n # mean pooling\n maskpool_embeddings = self._get_maskpool_embeddings(mask_features, outputs_mask)\n maskpool_embeddings = self._mask_pooling_proj(maskpool_embeddings) # (l b 1 q c)\n\n class_embed = self.class_embed(maskpool_embeddings + class_output)\n outputs_class = get_classification_logits(class_embed, text_classifier, self.logit_scale, num_templates)\n outputs_class = outputs_class.repeat(1, 1, T, 1, 1).transpose(2, 3) # (l, b, q, t, cls+1)\n return outputs_class\n\n def prediction(self, outputs, mask_features, text_classifier, num_templates):\n \"\"\"\n :param outputs: instance queries, shape is (t, l, q, b, c)\n :param mask_features: mask features, shape is (b, t, c, h, w)\n :return: pred class and pred masks\n \"\"\"\n if self.training:\n decoder_output = self.decoder_norm(outputs)\n decoder_output = decoder_output.permute(1, 3, 0, 2, 4) # (l, b, t, q, c)\n mask_embed = self.mask_embed(decoder_output)\n outputs_mask = torch.einsum(\"lbtqc,btchw->lbqthw\", mask_embed, mask_features)\n outputs_class = self.pred_class(decoder_output, mask_features,\n outputs_mask, text_classifier, num_templates)\n else:\n outputs = outputs[:, -1:]\n outputs_class, outputs_mask = self.windows_prediction(outputs, mask_features, text_classifier,\n num_templates, windows=self.windows)\n return outputs_class, outputs_mask" }, { "identifier": "MaskPooling", "path": "ov_dvis/video_mask2former_transformer_decoder_ov.py", "snippet": "class MaskPooling(nn.Module):\n def __init__(\n self,\n ):\n super().__init__()\n\n def forward(self, x, mask, return_num=False):\n \"\"\"\n Args:\n x: [B, C, H, W]\n mask: [B, Q, H, W]\n \"\"\"\n if not x.shape[-2:] == mask.shape[-2:]:\n # reshape mask to x\n mask = F.interpolate(mask, size=x.shape[-2:], mode='bilinear', align_corners=False)\n with torch.no_grad():\n mask = mask.detach()\n mask = (mask > 0).to(mask.dtype)\n denorm = mask.sum(dim=(-1, -2), keepdim=True) + 1e-8\n\n mask_pooled_x = torch.einsum(\n \"bchw,bqhw->bqc\",\n x,\n mask / denorm,\n )\n if return_num:\n return mask_pooled_x, denorm\n else:\n return mask_pooled_x" }, { "identifier": "loss_reid", "path": "dvis_Plus/utils.py", "snippet": "def loss_reid(qd_items, outputs):\n # outputs only using when have not contrastive items\n # compute two loss, contrastive loss & similarity loss\n contras_loss = 0\n aux_loss = 0\n num_qd_items = len(qd_items) # n_instances * frames\n\n # if none items, return 0 loss\n if len(qd_items) == 0:\n if 'pred_references' in outputs.keys():\n losses = {'loss_reid': outputs['pred_references'].sum() * 0,\n 'loss_aux_reid': outputs['pred_references'].sum() * 0}\n else:\n losses = {'loss_reid': outputs['pred_embds'].sum() * 0,\n 'loss_aux_reid': outputs['pred_embds'].sum() * 0}\n return losses\n\n for qd_item in qd_items:\n # (n_pos, n_anchor) -> (n_anchor, n_pos)\n pred = qd_item['dot_product'].permute(1, 0)\n label = qd_item['label'].unsqueeze(0)\n # contrastive loss\n pos_inds = (label == 1)\n neg_inds = (label == 0)\n pred_pos = pred * pos_inds.float()\n pred_neg = pred * neg_inds.float()\n # use -inf to mask out unwanted elements.\n pred_pos[neg_inds] = pred_pos[neg_inds] + float('inf')\n pred_neg[pos_inds] = pred_neg[pos_inds] + float('-inf')\n\n _pos_expand = torch.repeat_interleave(pred_pos, pred.shape[1], dim=1)\n _neg_expand = pred_neg.repeat(1, pred.shape[1])\n # [bz,N], N is all pos and negative samples on reference frame, label indicate it's pos or negative\n x = torch.nn.functional.pad(\n (_neg_expand - _pos_expand), (0, 1), \"constant\", 0)\n contras_loss += torch.logsumexp(x, dim=1)\n\n aux_pred = qd_item['cosine_similarity'].permute(1, 0)\n aux_label = qd_item['label'].unsqueeze(0)\n aux_loss += (torch.abs(aux_pred - aux_label) ** 2).mean()\n\n losses = {'loss_reid': contras_loss.sum() / num_qd_items,\n 'loss_aux_reid': aux_loss / num_qd_items}\n return losses" } ]
import logging import einops import torch from typing import Tuple from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data import MetadataCatalog from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head from detectron2.modeling.backbone import Backbone from detectron2.structures import Boxes, ImageList, Instances, BitMasks from mask2former_video.modeling.criterion import VideoSetCriterion_ov from mask2former_video.modeling.matcher import VideoHungarianMatcher, VideoHungarianMatcher_Consistent from mask2former_video.utils.memory import retry_if_cuda_oom from scipy.optimize import linear_sum_assignment from .video_dvis_modules_ov import ReferringTracker_noiser_OV, TemporalRefiner_OV from .video_mask2former_transformer_decoder_ov import MaskPooling from dvis_Plus.utils import loss_reid
13,492
*, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, train_metadatas: dict, test_metadatas: dict, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video tracker, num_frames, window_inference, max_num, max_iter_num, window_size, task, # fc-clip geometric_ensemble_alpha: float, geometric_ensemble_beta: float, ensemble_on_valid_mask: bool, # multi datasets test2train={}, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image # video tracker: a tracker module, e.g. ReferringTracker num_frames: number of frames sampled during training window_inference: if the GPU memory is insufficient to predict the entire video at once, inference needs to be performed clip by clip num_class: the categories number of the dataset max_num: the maximum number of instances retained for a video, only used in VIS max_iter_num: the iter nums window_size: the number of images processed by the segmenter at a time task: VIS, VSS or VPS """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, train_metadatas=train_metadatas, test_metadatas=test_metadatas, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, # video num_frames=num_frames, window_inference=window_inference, # dc clip geometric_ensemble_alpha=geometric_ensemble_alpha, geometric_ensemble_beta=geometric_ensemble_beta, ensemble_on_valid_mask=ensemble_on_valid_mask, # multi datasets test2train=test2train, ) # frozen the void classifier for p in self.void_embedding.parameters(): p.requires_grad_(False) # frozen the segmenter for p in self.backbone.parameters(): p.requires_grad_(False) for p in self.sem_seg_head.parameters(): p.requires_grad_(False) self.tracker = tracker self.max_num = max_num self.iter = 0 self.max_iter_num = max_iter_num self.window_size = window_size self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video_task = inference_dict[self.task] @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
logger = logging.getLogger(__name__) VILD_PROMPT = [ "a photo of a {}.", "This is a photo of a {}", "There is a {} in the scene", "There is the {} in the scene", "a photo of a {} in the scene", "a photo of a small {}.", "a photo of a medium {}.", "a photo of a large {}.", "This is a photo of a small {}.", "This is a photo of a medium {}.", "This is a photo of a large {}.", "There is a small {} in the scene.", "There is a medium {} in the scene.", "There is a large {} in the scene.", ] def get_classification_logits(x, text_classifier, logit_scale, num_templates=None): x = F.normalize(x, dim=-1) logit_scale = torch.clamp(logit_scale.exp(), max=100) pred_logits = logit_scale * x @ text_classifier.T # B, *, N + 1 # max ensembel as in OpenSeg/ODISE final_pred_logits = [] cur_idx = 0 for num_t in num_templates[:-1]: final_pred_logits.append(pred_logits[:, :, cur_idx: cur_idx + num_t].max(-1).values) cur_idx += num_t # final_pred_logits.append(pred_logits[:, :, -1]) # the last classifier is for void final_pred_logits.append(pred_logits[:, :, -num_templates[-1]:].max(-1).values) final_pred_logits = torch.stack(final_pred_logits, dim=-1) return final_pred_logits @META_ARCH_REGISTRY.register() class MinVIS_OV(nn.Module): @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, train_metadatas: dict, test_metadatas: dict, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video num_frames, window_inference, # fc-clip geometric_ensemble_alpha: float, geometric_ensemble_beta: float, ensemble_on_valid_mask: bool, # multi datasets test2train={}, task='vis', ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image semantic_on: bool, whether to output semantic segmentation prediction instance_on: bool, whether to output instance segmentation prediction panoptic_on: bool, whether to output panoptic segmentation prediction test_topk_per_image: int, instance segmentation parameter, keep topk instances per image test2train: dict, which void embedding to use """ super().__init__() self.backbone = backbone for p in self.backbone.parameters(): p.requires_grad_(False) self.sem_seg_head = sem_seg_head self.criterion = criterion self.num_queries = num_queries self.overlap_threshold = overlap_threshold self.object_mask_threshold = object_mask_threshold self.metadata = train_metadatas self.test_metadata = test_metadatas if size_divisibility < 0: # use backbone size_divisibility if not set size_divisibility = self.backbone.size_divisibility self.size_divisibility = size_divisibility self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) self.num_frames = num_frames self.window_inference = window_inference # FC-CLIP args self.mask_pooling = MaskPooling() self.geometric_ensemble_alpha = geometric_ensemble_alpha self.geometric_ensemble_beta = geometric_ensemble_beta self.ensemble_on_valid_mask = ensemble_on_valid_mask self.train_text_classifier = None self.test_text_classifier = None self.train_num_templates = None self.test_num_templates = None self.category_overlapping_mask = None self.train_text_classifier_dict = {} self.test_text_classifier_dict = {} self.train_num_templates_dict = {} self.test_num_templates_dict = {} self.test_num_templates_dict = {} self.void_embedding = nn.Embedding(1, backbone.dim_latent) # use this for void # init private void embedding for each dataset if len(train_metadatas) - 1 > 0: self.additional_void_embedding = nn.Embedding(len(train_metadatas) - 1, backbone.dim_latent) else: self.additional_void_embedding = None self.train_class_prepares = {} self.train_names2id = {} self.test_class_prepares = {} for i, name in enumerate(train_metadatas.keys()): self.train_names2id[name] = i train_metadata = train_metadatas[name] _, train_num_templates, train_class_names = self.prepare_class_names_from_metadata(train_metadata, train_metadata) self.train_class_prepares.update({name: {'num_templates': train_num_templates, 'class_names': train_class_names}}) all_train_metadatas = [train_metadatas[key] for key in train_metadatas.keys()] self.all_train_metadatas = all_train_metadatas for name in test_metadatas.keys(): test_metadata = test_metadatas[name] category_overlapping_mask, test_num_templates, test_class_names = self.prepare_class_names_from_metadata( test_metadata, all_train_metadatas) self.test_class_prepares.update({name: {'overlapping': category_overlapping_mask, 'num_templates': test_num_templates, 'class_names': test_class_names}}) self.test2train = test2train self.test_use_all_vocabulary = False self.void_embedding_merge_mode = 'coco' # ['mean', 'max', 'coco'] self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video = inference_dict[self.task] def get_text_classifier_with_void(self, text_classifier, num_templates, name): def split_labels(x): res = [] for x_ in x: x_ = x_.replace(', ', ',') x_ = x_.split(',') # there can be multiple synonyms for single class res.append(x_) return res if self.training or not self.test_use_all_vocabulary: if self.additional_void_embedding is None: _zero = self.void_embedding.weight.sum() * 0.0 else: _zero = self.void_embedding.weight.sum() * 0.0 + self.additional_void_embedding.weight.sum() * 0.0 if name in self.train_names2id.keys(): i = self.train_names2id[name] if i == 0: void_embed = self.void_embedding.weight else: void_embed = self.additional_void_embedding.weight[i - 1: i] void_embed = F.normalize(void_embed, dim=-1) + _zero else: if self.additional_void_embedding is None: void_embed = self.void_embedding.weight void_embed = F.normalize(void_embed, dim=-1) else: void_embed = torch.cat([self.void_embedding.weight, self.additional_void_embedding.weight], dim=0) void_embed = F.normalize(void_embed, dim=-1).detach() if self.void_embedding_merge_mode == 'mean': void_embed = torch.mean(void_embed, dim=0, keepdim=True) elif self.void_embedding_merge_mode == 'max': pass elif self.void_embedding_merge_mode == 'coco': void_embed = void_embed[:1] else: raise NotImplementedError text_classifier = torch.cat([text_classifier, void_embed], dim=0) num_templates = num_templates + [void_embed.shape[0]] return text_classifier, num_templates else: # print("using additional vocabulary !!!") class_names = split_labels(self.test_metadata[name].classes_ov) # it includes both thing and stuff if isinstance(self.all_train_metadatas, list): train_classes = [] for item in self.all_train_metadatas: train_classes += item.classes_ov if len(train_classes) != 0: train_class_names = split_labels(train_classes) else: raise NotImplementedError else: train_class_names = split_labels(self.all_train_metadatas.classes_ov) test_class_names = {l for label in class_names for l in label} # train_class_names = {l for label in train_class_names for l in label} train2test_category_overlapping_list = [] for train_class_name in train_class_names: not_overlapping = set(train_class_name).isdisjoint(set(test_class_names)) train2test_category_overlapping_list.extend([not_overlapping] * len(train_class_name)) train2test_category_overlapping_list = torch.tensor( train2test_category_overlapping_list, dtype=torch.bool) train_classifiers = [] for key in self.metadata.keys(): if key not in self.train_text_classifier_dict.keys(): self._set_class_information(key, train=True) train_classifiers.append(self.train_text_classifier_dict[key]) train_classifiers = torch.cat(train_classifiers, dim=0)[train2test_category_overlapping_list] if name in self.test2train.keys(): i = self.train_names2id[self.test2train[name]] if i == 0: void_embed = self.void_embedding.weight else: void_embed = self.additional_void_embedding.weight[i - 1: i] void_embed = F.normalize(void_embed, dim=-1) else: if self.additional_void_embedding is not None: void_embed = torch.cat([self.void_embedding.weight, self.additional_void_embedding.weight], dim=0) void_embed = F.normalize(void_embed, dim=-1).detach() if self.void_embedding_merge_mode == 'mean': void_embed = torch.mean(void_embed, dim=0, keepdim=True) elif self.void_embedding_merge_mode == 'max': pass elif self.void_embedding_merge_mode == 'coco': void_embed = void_embed[:1] else: raise NotImplementedError else: void_embed = self.void_embedding.weight void_embed = F.normalize(void_embed, dim=-1) text_classifier = torch.cat([text_classifier, void_embed, train_classifiers], dim=0) num_templates = num_templates + [len(void_embed) + len(train_classifiers)] return text_classifier, num_templates def _set_class_information(self, name, train=True): self.name = name if train: if name in self.train_text_classifier_dict.keys(): return self.train_text_classifier_dict[name], self.train_num_templates_dict[name] else: infos = self.train_class_prepares[name] self.train_num_templates = infos['num_templates'] self.train_class_names = infos['class_names'] self.train_text_classifier = None self.train_text_classifier, self.train_num_templates = self.get_text_classifier(train=train) self.train_text_classifier_dict[name] = self.train_text_classifier self.train_num_templates_dict[name] = self.train_num_templates return self.train_text_classifier, self.train_num_templates else: self.category_overlapping_mask = self.test_class_prepares[name]['overlapping'] if name in self.test_text_classifier_dict.keys(): return self.test_text_classifier_dict[name], self.test_num_templates_dict[name] infos = self.test_class_prepares[name] self.category_overlapping_mask = infos['overlapping'] self.test_num_templates = infos['num_templates'] self.test_class_names = infos['class_names'] self.test_text_classifier = None self.test_text_classifier, self.test_num_templates = self.get_text_classifier(train=train) self.test_text_classifier_dict[name] = self.test_text_classifier self.test_num_templates_dict[name] = self.test_num_templates return self.test_text_classifier, self.test_num_templates def set_metadata(self, name, metadata): print(metadata.classes_ov) self.category_overlapping_mask, self.test_num_templates, self.test_class_names = \ self.prepare_class_names_from_metadata(metadata, self.all_train_metadatas) self.test_class_prepares.update({name: {'overlapping': self.category_overlapping_mask, 'num_templates': self.test_num_templates, 'class_names': self.test_class_names}}) if name in self.test_text_classifier_dict.keys(): del self.test_text_classifier_dict[name] self.test_text_classifier = None return def get_text_classifier(self, train=False): if self.training or train: if self.train_text_classifier is None: text_classifier = [] # this is needed to avoid oom, which may happen when num of class is large bs = 128 for idx in range(0, len(self.train_class_names), bs): text_classifier.append(self.backbone.get_text_classifier(self.train_class_names[idx:idx+bs], self.device).detach()) text_classifier = torch.cat(text_classifier, dim=0) # get per text embedding for per class template # average across templates and normalization. text_classifier /= text_classifier.norm(dim=-1, keepdim=True) text_classifier = text_classifier.reshape(text_classifier.shape[0]//len(VILD_PROMPT), len(VILD_PROMPT), text_classifier.shape[-1]).mean(1) text_classifier /= text_classifier.norm(dim=-1, keepdim=True) self.train_text_classifier = text_classifier # self.train_text_classifier, per component templates # self.train_num_templates, per class have how many components return self.train_text_classifier, self.train_num_templates else: if self.test_text_classifier is None: text_classifier = [] # this is needed to avoid oom, which may happen when num of class is large bs = 128 for idx in range(0, len(self.test_class_names), bs): text_classifier.append(self.backbone.get_text_classifier(self.test_class_names[idx:idx+bs], self.device).detach()) text_classifier = torch.cat(text_classifier, dim=0) # average across templates and normalization. text_classifier /= text_classifier.norm(dim=-1, keepdim=True) text_classifier = text_classifier.reshape(text_classifier.shape[0]//len(VILD_PROMPT), len(VILD_PROMPT), text_classifier.shape[-1]).mean(1) text_classifier /= text_classifier.norm(dim=-1, keepdim=True) self.test_text_classifier = text_classifier return self.test_text_classifier, self.test_num_templates def prepare_class_names_from_metadata(self, metadata, train_metadata): def split_labels(x): res = [] for x_ in x: x_ = x_.replace(', ', ',') x_ = x_.split(',') # there can be multiple synonyms for single class res.append(x_) return res # get text classifier try: class_names = split_labels(metadata.classes_ov) # it includes both thing and stuff if isinstance(train_metadata, list): train_classes = [] for item in train_metadata: train_classes += item.classes_ov if len(train_classes) != 0: train_class_names = split_labels(train_classes) else: raise NotImplementedError else: train_class_names = split_labels(train_metadata.classes_ov) except: # this could be for insseg, where only thing_classes are available class_names = split_labels(metadata.thing_classes_ov) if isinstance(train_metadata, list): train_thing_classes = [] for item in train_metadata: train_thing_classes += item.thing_classes_ov train_class_names = split_labels(train_thing_classes) else: train_class_names = split_labels(train_metadata.thing_classes_ov) train_class_names = {l for label in train_class_names for l in label} category_overlapping_list = [] for test_class_names in class_names: is_overlapping = not set(train_class_names).isdisjoint(set(test_class_names)) category_overlapping_list.append(is_overlapping) category_overlapping_mask = torch.tensor( category_overlapping_list, dtype=torch.long) def fill_all_templates_ensemble(x_=''): res = [] for x in x_: for template in VILD_PROMPT: res.append(template.format(x)) return res, len(res) // len(VILD_PROMPT) num_templates = [] templated_class_names = [] for x in class_names: templated_classes, templated_classes_num = fill_all_templates_ensemble(x) templated_class_names += templated_classes num_templates.append(templated_classes_num) # how many templates for current classes class_names = templated_class_names # category_overlapping_mask (N_train, ) # num_templates, [num_per_class_name, ], num of cur class is splited to how many components # class_names, [per_class_template, ], per_class_template [N_comp * N_template] return category_overlapping_mask, num_templates, class_names @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion matcher = VideoHungarianMatcher( cost_class=class_weight, cost_mask=mask_weight, cost_dice=dice_weight, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, ) weight_dict = {"loss_ce": class_weight, "loss_mask": mask_weight, "loss_dice": dice_weight} if deep_supervision: dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS aux_weight_dict = {} for i in range(dec_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) losses = ["labels", "masks"] criterion = VideoSetCriterion_ov( sem_seg_head.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, num_points=cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS, oversample_ratio=cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO, importance_sample_ratio=cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO, ) train_metadatas = {} test_metadatas = {} for name in cfg.DATASETS.TRAIN: train_metadatas[name] = MetadataCatalog.get(name) for name in cfg.DATASETS.TEST: test_metadatas[name] = MetadataCatalog.get(name) return { "backbone": backbone, "sem_seg_head": sem_seg_head, "criterion": criterion, "num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, "object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, "overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, "train_metadatas": train_metadatas, "test_metadatas": test_metadatas, "size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, "sem_seg_postprocess_before_inference": True, "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, # video "num_frames": cfg.INPUT.SAMPLING_FRAME_NUM, "window_inference": cfg.MODEL.MASK_FORMER.TEST.WINDOW_INFERENCE, "task": cfg.MODEL.MASK_FORMER.TEST.TASK, # fc clip "geometric_ensemble_alpha": cfg.MODEL.FC_CLIP.GEOMETRIC_ENSEMBLE_ALPHA, "geometric_ensemble_beta": cfg.MODEL.FC_CLIP.GEOMETRIC_ENSEMBLE_BETA, "ensemble_on_valid_mask": cfg.MODEL.FC_CLIP.ENSEMBLE_ON_VALID_MASK, # multi datasets "test2train": {x: y for x, y in zip(cfg.DATASETS.TEST, cfg.DATASETS.TEST2TRAIN)}, } @property def device(self): return self.pixel_mean.device def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": per-region ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: list[dict]: each dict has the results for one image. The dict contains the following keys: * "sem_seg": A Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape KxHxW that represents the logits of each class for each pixel. * "panoptic_seg": A tuple that represent panoptic output panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ name = batched_inputs[0]['name'] for batched_input in batched_inputs: assert name == batched_input['name'] # print(batched_inputs) images = [] for video in batched_inputs: for frame in video["image"]: images.append(frame.to(self.device)) images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.size_divisibility) text_classifier, num_templates = self._set_class_information(batched_inputs[0]['name'], self.training) # Append void class weight text_classifier, num_templates = self.get_text_classifier_with_void(text_classifier, num_templates, name=batched_inputs[0]['name']) if not self.training and self.window_inference: outputs = self.run_window_inference(images.tensor, window_size=3, text_classifier=text_classifier, num_templates=num_templates) else: features = self.backbone(images.tensor) features['text_classifier'] = text_classifier features['num_templates'] = num_templates outputs = self.sem_seg_head(features) if self.training: # mask classification target targets = self.prepare_targets(batched_inputs, images) outputs, targets = self.frame_decoder_loss_reshape(outputs, targets) # bipartite matching-based loss losses = self.criterion(outputs, targets) for k in list(losses.keys()): if k in self.criterion.weight_dict: losses[k] *= self.criterion.weight_dict[k] else: # remove this loss if not specified in `weight_dict` losses.pop(k) return losses else: # when inference, bs must be 1 mask_cls_results = outputs["pred_logits"][0] # t q c mask_pred_results = outputs["pred_masks"][0].transpose(0, 1) # t q h w # We ensemble the pred logits of in-vocab and out-vocab if "clip_vis_dense" in outputs.keys(): clip_feature = outputs["clip_vis_dense"] else: clip_feature = features["clip_vis_dense"] mask_for_pooling = F.interpolate(mask_pred_results, size=clip_feature.shape[-2:], mode='bilinear', align_corners=False) if "convnext" in self.backbone.model_name.lower(): pooled_clip_feature = self.mask_pooling(clip_feature, mask_for_pooling) pooled_clip_feature = self.backbone.visual_prediction_forward(pooled_clip_feature) elif "rn" in self.backbone.model_name.lower(): try: pooled_clip_feature = self.backbone.visual_prediction_forward(clip_feature, mask_for_pooling) # (t, q, c) except: pooled_clip_feature = [] _windows_size = 16 iters = len(mask_for_pooling) // _windows_size if len(mask_for_pooling) % _windows_size != 0: iters += 1 for i in range(iters): start_idx = i * _windows_size end_idx = (i + 1) * _windows_size pooled_clip_feature.append(self.backbone.visual_prediction_forward( clip_feature[start_idx:end_idx].to(self.device), mask_for_pooling[start_idx:end_idx].to(self.device))) pooled_clip_feature = torch.cat(pooled_clip_feature, dim=0) else: raise NotImplementedError out_vocab_cls_results = get_classification_logits(pooled_clip_feature, text_classifier, self.backbone.clip_model.logit_scale, num_templates) in_vocab_cls_results = mask_cls_results[..., :-1] # remove void out_vocab_cls_results = out_vocab_cls_results[..., :-1] # remove void # Reference: https://github.com/NVlabs/ODISE/blob/main/odise/modeling/meta_arch/odise.py#L1506 out_vocab_cls_probs = out_vocab_cls_results.softmax(-1) in_vocab_cls_results = in_vocab_cls_results.softmax(-1) category_overlapping_mask = self.category_overlapping_mask.to(self.device) if self.ensemble_on_valid_mask: # Only include out_vocab cls results on masks with valid pixels # We empirically find that this is important to obtain reasonable AP/mIOU score with ResNet CLIP models valid_masking = (mask_for_pooling > 0).to(mask_for_pooling).sum(-1).sum(-1) > 0 valid_masking = valid_masking.to(in_vocab_cls_results.dtype).unsqueeze(-1) alpha = torch.ones_like(in_vocab_cls_results) * self.geometric_ensemble_alpha beta = torch.ones_like(in_vocab_cls_results) * self.geometric_ensemble_beta alpha = alpha * valid_masking beta = beta * valid_masking else: alpha = self.geometric_ensemble_alpha beta = self.geometric_ensemble_beta cls_logits_seen = ( (in_vocab_cls_results ** (1 - alpha) * out_vocab_cls_probs ** alpha).log() * category_overlapping_mask ) cls_logits_unseen = ( (in_vocab_cls_results ** (1 - beta) * out_vocab_cls_probs ** beta).log() * (1 - category_overlapping_mask) ) cls_results = cls_logits_seen + cls_logits_unseen # This is used to filtering void predictions. is_void_prob = F.softmax(mask_cls_results, dim=-1)[..., -1:] mask_cls_probs = torch.cat([ cls_results.softmax(-1) * (1.0 - is_void_prob), is_void_prob], dim=-1) mask_cls_results = torch.log(mask_cls_probs + 1e-8) outputs["pred_logits"][0] = mask_cls_results # t q c # for minvis outputs = self.post_processing(outputs) mask_cls_results = outputs["pred_logits"] mask_pred_results = outputs["pred_masks"] mask_cls_result = mask_cls_results[0] mask_pred_result = mask_pred_results[0] first_resize_size = (images.tensor.shape[-2], images.tensor.shape[-1]) input_per_image = batched_inputs[0] image_size = images.image_sizes[0] # image size without padding after data augmentation height = input_per_image.get("height", image_size[0]) # raw image size before data augmentation width = input_per_image.get("width", image_size[1]) return retry_if_cuda_oom(self.inference_video)( mask_cls_result, mask_pred_result, image_size, height, width, first_resize_size) def frame_decoder_loss_reshape(self, outputs, targets): outputs['pred_masks'] = einops.rearrange(outputs['pred_masks'], 'b q t h w -> (b t) q () h w') outputs['pred_logits'] = einops.rearrange(outputs['pred_logits'], 'b t q c -> (b t) q c') if 'aux_outputs' in outputs: for i in range(len(outputs['aux_outputs'])): outputs['aux_outputs'][i]['pred_masks'] = einops.rearrange( outputs['aux_outputs'][i]['pred_masks'], 'b q t h w -> (b t) q () h w' ) outputs['aux_outputs'][i]['pred_logits'] = einops.rearrange( outputs['aux_outputs'][i]['pred_logits'], 'b t q c -> (b t) q c' ) gt_instances = [] for targets_per_video in targets: num_labeled_frames = targets_per_video['ids'].shape[1] for f in range(num_labeled_frames): labels = targets_per_video['labels'] ids = targets_per_video['ids'][:, [f]] masks = targets_per_video['masks'][:, [f], :, :] gt_instances.append({"labels": labels, "ids": ids, "masks": masks}) return outputs, gt_instances def match_from_embds(self, tgt_embds, cur_embds): cur_embds = cur_embds / cur_embds.norm(dim=1)[:, None] tgt_embds = tgt_embds / tgt_embds.norm(dim=1)[:, None] cos_sim = torch.mm(cur_embds, tgt_embds.transpose(0, 1)) cost_embd = 1 - cos_sim C = 1.0 * cost_embd C = C.cpu() indices = linear_sum_assignment(C.transpose(0, 1)) # target x current indices = indices[1] # permutation that makes current aligns to target return indices def post_processing(self, outputs): pred_logits, pred_masks, pred_embds = outputs['pred_logits'], outputs['pred_masks'], outputs['pred_embds'] pred_logits = pred_logits[0] pred_masks = einops.rearrange(pred_masks[0], 'q t h w -> t q h w') pred_embds = einops.rearrange(pred_embds[0], 'c t q -> t q c') pred_logits = list(torch.unbind(pred_logits)) pred_masks = list(torch.unbind(pred_masks)) pred_embds = list(torch.unbind(pred_embds)) out_logits = [] out_masks = [] out_embds = [] out_logits.append(pred_logits[0]) out_masks.append(pred_masks[0]) out_embds.append(pred_embds[0]) # match the instances frame by frame for i in range(1, len(pred_logits)): indices = self.match_from_embds(out_embds[-1], pred_embds[i]) out_logits.append(pred_logits[i][indices, :]) out_masks.append(pred_masks[i][indices, :, :]) out_embds.append(pred_embds[i][indices, :]) out_logits = sum(out_logits)/len(out_logits) out_masks = torch.stack(out_masks, dim=1) # q h w -> q t h w out_logits = out_logits.unsqueeze(0) out_masks = out_masks.unsqueeze(0) outputs['pred_logits'] = out_logits outputs['pred_masks'] = out_masks return outputs def run_window_inference(self, images_tensor, window_size=30, text_classifier=None, num_templates=None): iters = len(images_tensor) // window_size if len(images_tensor) % window_size != 0: iters += 1 out_list = [] for i in range(iters): start_idx = i * window_size end_idx = (i+1) * window_size features = self.backbone(images_tensor[start_idx:end_idx]) features['text_classifier'] = text_classifier features['num_templates'] = num_templates out = self.sem_seg_head(features) del features['res2'], features['res3'], features['res4'], features['res5'] for j in range(len(out['aux_outputs'])): del out['aux_outputs'][j]['pred_masks'], out['aux_outputs'][j]['pred_logits'] # out['pred_masks'] = out['pred_masks'].detach().cpu().to(torch.float32) out['pred_masks'] = out['pred_masks'].detach() out['clip_vis_dense'] = features['clip_vis_dense'] out_list.append(out) # merge outputs outputs = {} outputs['pred_logits'] = torch.cat([x['pred_logits'] for x in out_list], dim=1).detach() outputs['pred_masks'] = torch.cat([x['pred_masks'] for x in out_list], dim=2).detach() outputs['pred_embds'] = torch.cat([x['pred_embds'] for x in out_list], dim=2).detach() outputs['clip_vis_dense'] = torch.cat([x['clip_vis_dense'] for x in out_list], dim=0).detach() return outputs def prepare_targets(self, targets, images): h_pad, w_pad = images.tensor.shape[-2:] gt_instances = [] for targets_per_video in targets: _num_instance = len(targets_per_video["instances"][0]) mask_shape = [_num_instance, self.num_frames, h_pad, w_pad] gt_masks_per_video = torch.zeros(mask_shape, dtype=torch.bool, device=self.device) gt_ids_per_video = [] gt_classes_per_video = [] for f_i, targets_per_frame in enumerate(targets_per_video["instances"]): targets_per_frame = targets_per_frame.to(self.device) h, w = targets_per_frame.image_size gt_ids_per_video.append(targets_per_frame.gt_ids[:, None]) gt_classes_per_video.append(targets_per_frame.gt_classes[:, None]) if isinstance(targets_per_frame.gt_masks, BitMasks): gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks.tensor else: # polygon gt_masks_per_video[:, f_i, :h, :w] = targets_per_frame.gt_masks gt_ids_per_video = torch.cat(gt_ids_per_video, dim=1) gt_classes_per_video = torch.cat(gt_classes_per_video, dim=1).max(dim=1)[0] valid_idx = (gt_ids_per_video != -1).any(dim=-1) gt_classes_per_video = gt_classes_per_video[valid_idx] # N, gt_ids_per_video = gt_ids_per_video[valid_idx] # N, num_frames gt_instances.append({"labels": gt_classes_per_video, "ids": gt_ids_per_video}) gt_masks_per_video = gt_masks_per_video[valid_idx].float() # N, num_frames, H, W gt_instances[-1].update({"masks": gt_masks_per_video}) return gt_instances def inference_video_vis( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, ): if len(pred_cls) > 0: scores = F.softmax(pred_cls, dim=-1)[:, :-1] labels = torch.arange( # self.sem_seg_head.num_classes, device=self.device pred_cls.shape[-1] - 1, device=self.device ).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1) # keep top-K predictions scores_per_image, topk_indices = scores.flatten(0, 1).topk(10, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // (pred_cls.shape[-1] - 1) pred_masks = pred_masks[topk_indices] # interpolation to original image size pred_masks = F.interpolate( pred_masks, size=first_resize_size, mode="bilinear", align_corners=False ) pred_masks = pred_masks[:, :, : img_size[0], : img_size[1]] pred_masks = F.interpolate( pred_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) masks = pred_masks > 0. del pred_masks out_scores = scores_per_image.tolist() out_labels = labels_per_image.tolist() out_masks = [m for m in masks.cpu()] else: out_scores = [] out_labels = [] out_masks = [] video_output = { "image_size": (output_height, output_width), "pred_scores": out_scores, "pred_labels": out_labels, "pred_masks": out_masks, "task": "vis", } return video_output def inference_video_vps( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, ): pred_cls = F.softmax(pred_cls, dim=-1) mask_pred = pred_masks scores, labels = pred_cls.max(-1) # filter out the background prediction keep = labels.ne(pred_cls.shape[-1] - 1) & (scores > self.object_mask_threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_masks = mask_pred[keep] # interpolation to original image size cur_masks = F.interpolate( cur_masks, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) cur_prob_masks = cur_scores.view(-1, 1, 1, 1).to(cur_masks.device) * cur_masks # initial panoptic_seg and segments infos h, w = cur_masks.shape[-2:] panoptic_seg = torch.zeros((cur_masks.size(1), h, w), dtype=torch.int32, device=cur_masks.device) segments_infos = [] current_segment_id = 0 if cur_masks.shape[0] == 0: # We didn't detect any mask return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "task": "vps", } else: # take argmax cur_mask_ids = cur_prob_masks.argmax(0) # (t, h, w) stuff_memory_list = {} for k in range(cur_classes.shape[0]): pred_class = cur_classes[k].item() isthing = pred_class < len(self.test_metadata[self.name].thing_dataset_id_to_contiguous_id) # filter out the unstable segmentation results mask_area = (cur_mask_ids == k).sum().item() original_area = (cur_masks[k] >= 0.5).sum().item() mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5) if mask_area > 0 and original_area > 0 and mask.sum().item() > 0: if mask_area / original_area < self.overlap_threshold: continue # merge stuff regions if not isthing: if int(pred_class) in stuff_memory_list.keys(): panoptic_seg[mask] = stuff_memory_list[int(pred_class)] continue else: stuff_memory_list[int(pred_class)] = current_segment_id + 1 current_segment_id += 1 panoptic_seg[mask] = current_segment_id segments_infos.append( { "id": current_segment_id, "isthing": bool(isthing), "category_id": int(pred_class), } ) return { "image_size": (output_height, output_width), "pred_masks": panoptic_seg.cpu(), "segments_infos": segments_infos, "task": "vps", } def inference_video_vss( self, pred_cls, pred_masks, img_size, output_height, output_width, first_resize_size, ): mask_cls = F.softmax(pred_cls, dim=-1)[..., :-1] mask_pred = pred_masks # interpolation to original image size cur_masks = F.interpolate( mask_pred, size=first_resize_size, mode="bilinear", align_corners=False ) cur_masks = cur_masks[:, :, :img_size[0], :img_size[1]].sigmoid() cur_masks = F.interpolate( cur_masks, size=(output_height, output_width), mode="bilinear", align_corners=False ) semseg = torch.einsum("qc,qthw->cthw", mask_cls, cur_masks) sem_score, sem_mask = semseg.max(0) sem_mask = sem_mask return { "image_size": (output_height, output_width), "pred_masks": sem_mask.cpu(), "task": "vss", } @META_ARCH_REGISTRY.register() class DVIS_online_OV(MinVIS_OV): """ Online version of DVIS, including a segmenter and a referring tracker. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, criterion: nn.Module, num_queries: int, object_mask_threshold: float, overlap_threshold: float, train_metadatas: dict, test_metadatas: dict, size_divisibility: int, sem_seg_postprocess_before_inference: bool, pixel_mean: Tuple[float], pixel_std: Tuple[float], # video tracker, num_frames, window_inference, max_num, max_iter_num, window_size, task, # fc-clip geometric_ensemble_alpha: float, geometric_ensemble_beta: float, ensemble_on_valid_mask: bool, # multi datasets test2train={}, ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss num_queries: int, number of queries object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image # video tracker: a tracker module, e.g. ReferringTracker num_frames: number of frames sampled during training window_inference: if the GPU memory is insufficient to predict the entire video at once, inference needs to be performed clip by clip num_class: the categories number of the dataset max_num: the maximum number of instances retained for a video, only used in VIS max_iter_num: the iter nums window_size: the number of images processed by the segmenter at a time task: VIS, VSS or VPS """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, train_metadatas=train_metadatas, test_metadatas=test_metadatas, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, # video num_frames=num_frames, window_inference=window_inference, # dc clip geometric_ensemble_alpha=geometric_ensemble_alpha, geometric_ensemble_beta=geometric_ensemble_beta, ensemble_on_valid_mask=ensemble_on_valid_mask, # multi datasets test2train=test2train, ) # frozen the void classifier for p in self.void_embedding.parameters(): p.requires_grad_(False) # frozen the segmenter for p in self.backbone.parameters(): p.requires_grad_(False) for p in self.sem_seg_head.parameters(): p.requires_grad_(False) self.tracker = tracker self.max_num = max_num self.iter = 0 self.max_iter_num = max_iter_num self.window_size = window_size self.task = task assert self.task in ['vis', 'vss', 'vps'], "Only support vis, vss and vps !" inference_dict = { 'vis': self.inference_video_vis, 'vss': self.inference_video_vss, 'vps': self.inference_video_vps, } self.inference_video_task = inference_dict[self.task] @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) sem_seg_head = build_sem_seg_head(cfg, backbone.output_shape()) # Loss parameters: deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT # loss weights class_weight = cfg.MODEL.MASK_FORMER.CLASS_WEIGHT dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT # building criterion
matcher = VideoHungarianMatcher_Consistent(
2
2023-11-14 10:55:11+00:00
16k
ej0cl6/TextEE
TextEE/models/Degree/EDtrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass" }, { "identifier": "DegreeEDModel", "path": "TextEE/models/Degree/EDmodel.py", "snippet": "class DegreeEDModel(nn.Module):\n def __init__(self, config, tokenizer, type_set):\n super().__init__()\n self.config = config\n self.tokenizer = tokenizer\n self.type_set = type_set\n \n if self.config.pretrained_model_name.startswith('facebook/bart'):\n self.model_config = AutoConfig.from_pretrained(self.config.pretrained_model_name,\n cache_dir=self.config.cache_dir)\n self.model = AutoModelForPreTraining.from_pretrained(self.config.pretrained_model_name,\n cache_dir=self.config.cache_dir, config=self.model_config)\n else:\n raise ValueError(\"Not implemented.\")\n \n self.model.resize_token_embeddings(len(self.tokenizer))\n \n def process_data(self, batch):\n # encoder inputs\n inputs = self.tokenizer(batch.batch_input, return_tensors='pt', padding=True)\n enc_idxs = inputs['input_ids']\n enc_attn = inputs['attention_mask']\n\n # decoder inputs\n targets = self.tokenizer(batch.batch_target, return_tensors='pt', padding=True)\n batch_size = enc_idxs.size(0)\n \n if self.config.pretrained_model_name.startswith('facebook/bart'):\n padding = torch.ones((batch_size, 1), dtype=torch.long)\n padding[:] = self.tokenizer.eos_token_id\n # for BART, the decoder input should be:\n # PAD => BOS\n # BOS => A\n # A => B \n else:\n # t5 case\n padding = torch.ones((batch_size, 1), dtype=torch.long)\n padding[:] = self.tokenizer.pad_token_id\n # for t5, the decoder input should be:\n # PAD => A\n # A => B\n \n dec_idxs = torch.cat((padding, targets['input_ids']), dim=1)\n dec_attn = torch.cat((torch.ones((batch_size, 1), dtype=torch.long), targets['attention_mask']), dim=1)\n # dec_idxs = targets['input_ids']\n # dec_idxs[:, 0] = self.tokenizer.eos_token_id\n # dec_attn = targets['attention_mask']\n \n # labels\n padding = torch.ones((batch_size, 1), dtype=torch.long)\n padding[:] = self.tokenizer.pad_token_id\n raw_lbl_idxs = torch.cat((dec_idxs[:, 1:], padding), dim=1)\n lbl_attn = torch.cat((dec_attn[:, 1:], torch.zeros((batch_size, 1), dtype=torch.long)), dim=1)\n lbl_idxs = raw_lbl_idxs.masked_fill(lbl_attn==0, -100) # ignore padding\n \n enc_idxs = enc_idxs.cuda()\n enc_attn = enc_attn.cuda()\n dec_idxs = dec_idxs.cuda()\n dec_attn = dec_attn.cuda()\n raw_lbl_idxs = raw_lbl_idxs.cuda()\n lbl_idxs = lbl_idxs.cuda()\n \n return enc_idxs, enc_attn, dec_idxs, dec_attn, raw_lbl_idxs, lbl_idxs\n\n def forward(self, batch):\n enc_idxs, enc_attn, dec_idxs, dec_attn, raw_lbl_idxs, lbl_idxs = self.process_data(batch)\n outputs = self.model(input_ids=enc_idxs, \n attention_mask=enc_attn, \n decoder_input_ids=dec_idxs, \n decoder_attention_mask=dec_attn, \n labels=lbl_idxs, \n return_dict=True)\n \n loss = outputs['loss']\n \n return loss\n \n def predict(self, batch, num_beams=4, max_length=50):\n enc_idxs, enc_attn, dec_idxs, dec_attn, raw_lbl_idxs, lbl_idxs = self.process_data(batch)\n return self.generate(enc_idxs, enc_attn, num_beams, max_length)\n \n def generate(self, input_ids, attention_mask, num_beams=4, max_length=50, **kwargs):\n self.eval()\n with torch.no_grad():\n outputs = self.model.generate(input_ids=input_ids, \n attention_mask=attention_mask, \n num_beams=num_beams, \n max_length=max_length)\n final_output = []\n for bid in range(len(input_ids)):\n output_sentence = self.tokenizer.decode(outputs[bid], skip_special_tokens=True, clean_up_tokenization_spaces=True)\n final_output.append(output_sentence)\n self.train()\n return final_output\n \n def save_model(self, save_path):\n self.model.save_pretrained(save_path)\n\n def load_model(self, load_path):\n self.model.from_pretrained(load_path)" }, { "identifier": "event_template", "path": "TextEE/models/Degree/template_generate.py", "snippet": "class event_template():\n def __init__(self, event_type, info_dict, input_style, output_style, passage, ROLE_PH_MAP, gold_event=None):\n self.ROLE_PH_MAP = ROLE_PH_MAP\n self.info_dict = info_dict\n self.event_type = event_type\n self.input_style = input_style\n self.output_style = output_style\n self.output_template = self.get_output_template()\n self.passage = ' '.join(passage) # Assume this is English\n self.tokens = passage\n \n if gold_event is not None:\n self.gold_event = gold_event\n if isinstance(gold_event, list):\n # instance base\n self.trigger_text = f\" {AND} \".join([x['trigger text'] for x in gold_event if x['event type']==event_type])\n self.trigger_span = [x['trigger span'] for x in gold_event if x['event type']==event_type]\n self.arguments = [x['arguments'] for x in gold_event if x['event type']==event_type]\n else:\n # trigger base\n self.trigger_text = gold_event['trigger text']\n self.trigger_span = [gold_event['trigger span']]\n self.arguments = [gold_event['arguments']] \n else:\n self.gold_event = None\n \n def get_keywords(self):\n return self.info_dict['keywords']\n\n def get_output_template(self):\n output_template = ''\n for o_style in OUTPUT_STYLE_SET:\n if o_style in self.output_style:\n if o_style == 'trigger:sentence':\n output_template += ' {} {}'.format(SEP, format_template(self.info_dict['ED template'], self.ROLE_PH_MAP))\n if o_style == 'argument:sentence':\n output_template += ' {} {}'.format(SEP, format_template(self.info_dict['EAE template'], self.ROLE_PH_MAP))\n return (f'{SEP}'.join(output_template.split(f'{SEP}')[1:])).strip()\n\n def generate_pair(self, query_trigger):\n \"\"\"\n Generate model input sentence and output sentence pair\n \"\"\"\n input_str, supplements = self.generate_input_str_detail(query_trigger)\n output_str, gold_sample = self.generate_output_str(query_trigger)\n return (input_str, output_str, self.gold_event, gold_sample, self.event_type, self.tokens, supplements)\n\n def generate_input_str_detail(self, query_trigger):\n input_str = ''\n for i_style in INPUT_STYLE_SET:\n if i_style in self.input_style:\n if i_style == 'event_type':\n input_str += ' {} {}'.format(SEP, self.info_dict['event type'])\n if i_style == 'event_type_sent':\n input_str += ' {} {}'.format(SEP, self.info_dict['event description'])\n if i_style == 'keywords':\n input_str += ' {} Similar triggers such as {}'.format(SEP, ', '.join(self.get_keywords()))\n if i_style == 'triggers':\n input_str += ' {} The event trigger word is {}'.format(SEP, query_trigger)\n if i_style == 'template':\n input_str += ' {} {}'.format(SEP, self.output_template)\n return self.passage+input_str, input_str\n\n def generate_input_str(self, query_trigger):\n input_str = self.passage\n for i_style in INPUT_STYLE_SET:\n if i_style in self.input_style:\n if i_style == 'event_type':\n input_str += ' {} {}'.format(SEP, self.info_dict['event type'])\n if i_style == 'event_type_sent':\n input_str += ' {} {}'.format(SEP, self.info_dict['event description'])\n if i_style == 'keywords':\n input_str += ' {} Similar triggers such as {}'.format(SEP, ', '.join(self.get_keywords()))\n if i_style == 'triggers':\n input_str += ' {} The event trigger word is {}'.format(SEP, query_trigger)\n if i_style == 'template':\n input_str += ' {} {}'.format(SEP, self.output_template)\n return input_str\n\n def generate_output_str(self, query_trigger):\n assert self.gold_event is not None\n output_str = ''\n gold_sample = False\n for o_style in OUTPUT_STYLE_SET:\n if o_style in self.output_style:\n if o_style == 'trigger:sentence':\n filler = dict()\n if self.trigger_text != '':\n filler[\"Trigger\"] = self.trigger_text\n gold_sample = True\n else:\n filler[\"Trigger\"] = TRIGGER_PH_MAP['Trigger']\n output_str += ' {} {}'.format(SEP, self.info_dict['ED template'].format(**filler))\n\n if o_style == 'argument:sentence':\n output_texts = []\n for argu in self.arguments:\n filler = dict()\n roles = re.findall(r\"{[^/}][^}]*}\", self.info_dict['EAE template'])\n roles = [role[1:-1].split(ROLE_TEMPLATE_PREFIX, 1)[1] for role in roles]\n for role_type in roles:\n filler['{}{}'.format(ROLE_TEMPLATE_PREFIX, role_type)] = f\" {AND} \".join([ a['argument text'] for a in argu[role_type]]) if role_type in argu.keys() else self.ROLE_PH_MAP['ROLE_{}'.format(role_type)]\n output_texts.append(self.info_dict['EAE template'].format(**filler))\n gold_sample = True\n output_str += ' {} {}'.format(SEP, ' <sep> '.join(output_texts))\n\n output_str = (f'{SEP}'.join(output_str.split(f'{SEP}')[1:])).strip()\n return (output_str, gold_sample)\n\n def decode(self, preds):\n output = []\n for cnt, pred in enumerate(preds.split(f'{SEP}')):\n used_o_cnt = 0\n full_pred = pred.strip()\n for o_style in OUTPUT_STYLE_SET:\n if o_style in self.output_style:\n if o_style == 'trigger:sentence':\n if used_o_cnt == cnt:\n # try:\n # contexts = re.split(r\"{[^/}][^}]*}\", self.info_dict['ED template'])\n # triggers = []\n # for idx in range(len(contexts)-1):\n # trigger = full_pred.split(contexts[idx], 1)[1]\n # trigger = trigger.split(contexts[idx+1], 1)[0]\n # triggers.append(trigger.strip())\n # triggers = [tri for trigger in triggers for tri in trigger.split(' and ') ]\n # for t_cnt, t in enumerate(triggers):\n # if t != TRIGGER_PH_MAP['Trigger'] and t != '':\n # output.append((t, self.event_type, {'tri counter': t_cnt})) # (text, type, kwargs)\n # except:\n # pass\n contexts = re.split(r\"{[^/}][^}]*}\", self.info_dict['ED template'])\n triggers = []\n for idx in range(len(contexts)-1):\n try:\n trigger = full_pred.split(contexts[idx], 1)[1]\n trigger = trigger.split(contexts[idx+1], 1)[0]\n triggers.append(trigger.strip())\n except:\n pass\n triggers = [tri for trigger in triggers for tri in trigger.split(f' {AND} ')]\n for t_cnt, t in enumerate(triggers):\n if t != TRIGGER_PH_MAP['Trigger'] and t != '':\n output.append((t, self.event_type, {'tri counter': t_cnt})) # (text, type, kwargs)\n used_o_cnt += 1\n if o_style == 'argument:sentence':\n if used_o_cnt == cnt:\n for a_cnt, prediction in enumerate(full_pred.split(' <sep> ')):\n contexts = re.split(r\"{[^/}][^}]*}\", self.info_dict['EAE template'])\n roles = re.findall(r\"{[^/}][^}]*}\", self.info_dict['EAE template'])\n roles = [role[1:-1].split(ROLE_TEMPLATE_PREFIX, 1)[1] for role in roles]\n assert len(contexts) == len(roles)+1\n\n for idx in range(len(contexts)-1):\n try:\n if contexts[idx] != '':\n pred_argu = prediction.split(contexts[idx], 1)[1]\n else:\n pred_argu = prediction\n if contexts[idx+1] != '':\n pred_argu = pred_argu.split(contexts[idx+1], 1)[0]\n pred_argu = pred_argu.split(f' {AND} ')\n for argu in pred_argu:\n if argu != self.ROLE_PH_MAP[\"{}{}\".format(ROLE_TEMPLATE_PREFIX, roles[idx])]:\n if argu != '':\n output.append((argu, roles[idx], {'cor tri cnt': a_cnt}))\n except:\n pass\n used_o_cnt += 1\n \n return output\n\n def evaluate(self, predict_output):\n assert self.gold_event is not None\n # categorize prediction\n pred_trigger = []\n pred_argument = []\n for pred in predict_output:\n if pred[1] == self.event_type:\n pred_trigger.append(pred)\n else:\n pred_argument.append(pred)\n \n # get trigger id map\n pred_trigger_map = {}\n for p_tri in pred_trigger:\n # assert p_tri[2]['tri counter'] not in pred_trigger_map.keys()\n pred_trigger_map[p_tri[2]['tri counter']] = p_tri\n\n # trigger score\n gold_tri_num = len(self.trigger_span)\n pred_tris = []\n for pred in pred_trigger:\n pred_span = self.predstr2span(pred[0])\n if pred_span[0] > -1:\n pred_tris.append((pred_span[0], pred_span[1], pred[1]))\n pred_tri_num = len(pred_tris)\n match_tri = 0\n for pred in pred_tris:\n id_flag = False\n for gold_span in self.trigger_span:\n if gold_span[0] == pred[0] and gold_span[1] == pred[1]:\n id_flag = True\n match_tri += int(id_flag)\n\n # argument score\n converted_gold = self.get_converted_gold()\n gold_arg_num = len(converted_gold)\n pred_arg = []\n for pred in pred_argument:\n # find corresponding trigger\n pred_span = None\n if isinstance(self.gold_event, list):\n # end2end case\n try:\n # we need this ``try'' because we cannot gurantee the model will be bug-free on the matching\n cor_tri = pred_trigger_map[pred[2]['cor tri cnt']]\n cor_tri_span_head = self.predstr2span(cor_tri[0])[0]\n if cor_tri_span_head > -1:\n pred_span = self.predstr2span(pred[0], cor_tri_span_head)\n else:\n continue\n except Exception as e:\n print('unmatch exception')\n print(e)\n else:\n # argument only case\n pred_span = self.predstr2span(pred[0], self.trigger_span[0][0])\n if (pred_span is not None) and (pred_span[0] > -1):\n pred_arg.append((pred_span[0], pred_span[1], pred[1]))\n pred_arg = list(set(pred_arg))\n pred_arg_num = len(pred_arg)\n \n target = converted_gold\n match_id = 0\n match_type = 0\n for pred in pred_arg:\n id_flag = False\n id_type = False\n for gold in target:\n if gold[0]==pred[0] and gold[1]==pred[1]:\n id_flag = True\n if gold[2] == pred[2]:\n id_type = True\n break\n match_id += int(id_flag)\n match_type += int(id_type)\n return {\n 'gold_tri_num': gold_tri_num, \n 'pred_tri_num': pred_tri_num,\n 'match_tri_num': match_tri,\n 'gold_arg_num': gold_arg_num,\n 'pred_arg_num': pred_arg_num,\n 'match_arg_id': match_id,\n 'match_arg_cls': match_type\n }\n \n def get_converted_gold(self):\n converted_gold = []\n for argu in self.arguments:\n for arg_type, arg_list in argu.items():\n for arg in arg_list:\n converted_gold.append((arg['argument span'][0], arg['argument span'][1], arg_type))\n return list(set(converted_gold))\n \n def predstr2span(self, pred_str, trigger_idx=None):\n sub_words = [_.strip() for _ in pred_str.strip().lower().split()]\n candidates=[]\n for i in range(len(self.tokens)):\n j = 0\n while j < len(sub_words) and i+j < len(self.tokens):\n if self.tokens[i+j].lower() == sub_words[j]:\n j += 1\n else:\n break\n if j == len(sub_words):\n candidates.append((i, i+len(sub_words)))\n if len(candidates) < 1:\n return -1, -1\n else:\n if trigger_idx is not None:\n return sorted(candidates, key=lambda x: abs(trigger_idx-x[0]))[0]\n else:\n return candidates[0]" }, { "identifier": "eve_template_generator", "path": "TextEE/models/Degree/template_generate.py", "snippet": "class eve_template_generator():\n def __init__(self, dataset, passage, triggers, roles, input_style, output_style, vocab, instance_base=False):\n \"\"\"\n generate strctured information for events\n \n args:\n dataset(str): which dataset is used\n passage(List): a list of tokens\n triggers(List): a list of triggers\n roles(List): a list of Roles\n input_style(List): List of elements; elements belongs to INPUT_STYLE_SET\n input_style(List): List of elements; elements belongs to OUTPUT_STYLE_SET\n instance_base(Bool): if instance_base, we generate only one pair (use for trigger generation), else, we generate trigger_base (use for argument generation)\n \"\"\"\n self.raw_passage = passage\n self.triggers = triggers\n self.roles = roles\n self.events = self.process_events(passage, triggers, roles)\n self.input_style = input_style\n self.output_style = output_style\n self.vocab = vocab\n self.event_templates = []\n if instance_base:\n for e_type in self.vocab['event_type_itos']:\n self.event_templates.append(\n event_template(e_type, patterns[dataset][e_type], \n self.input_style, self.output_style, passage, ROLE_PH_MAP[dataset], self.events)\n )\n else:\n for event in self.events:\n self.event_templates.append(\n event_template(event['event type'], patterns[dataset][event['event type']], \n self.input_style, self.output_style, event['tokens'], ROLE_PH_MAP[dataset], event)\n )\n self.data = [x.generate_pair(x.trigger_text) for x in self.event_templates]\n self.data = [x for x in self.data if x]\n\n def get_training_data(self):\n return self.data\n\n def process_events(self, passage, triggers, roles):\n \"\"\"\n Given a list of token and event annotation, return a list of structured event\n\n structured_event:\n {\n 'trigger text': str,\n 'trigger span': (start, end),\n 'event type': EVENT_TYPE(str),\n 'arguments':{\n ROLE_TYPE(str):[{\n 'argument text': str,\n 'argument span': (start, end)\n }],\n ROLE_TYPE(str):...,\n ROLE_TYPE(str):....\n }\n 'passage': PASSAGE\n }\n \"\"\"\n \n events = {trigger: [] for trigger in triggers}\n\n for argument in roles:\n trigger = argument[0]\n events[trigger].append(argument)\n \n event_structures = []\n for trigger, arguments in events.items():\n eve_type = trigger[2]\n eve_text = ' '.join(passage[trigger[0]:trigger[1]])\n eve_span = (trigger[0], trigger[1])\n argus = {}\n for argument in arguments:\n role_type = argument[1][2]\n if role_type not in argus.keys():\n argus[role_type] = []\n argus[role_type].append({\n 'argument text': ' '.join(passage[argument[1][0]:argument[1][1]]),\n 'argument span': (argument[1][0], argument[1][1]),\n })\n event_structures.append({\n 'trigger text': eve_text,\n 'trigger span': eve_span,\n 'event type': eve_type,\n 'arguments': argus,\n 'passage': ' '.join(passage),\n 'tokens': passage\n })\n return event_structures" }, { "identifier": "patterns", "path": "TextEE/models/Degree/pattern.py", "snippet": "ROLE_PH_MAP = {\n \"ace05-en\": {\n 'ROLE_Person': 'somebody',\n 'ROLE_Entity': 'some people or some organization',\n 'ROLE_Defendant': 'somebody',\n 'ROLE_Prosecutor': 'some other',\n 'ROLE_Plaintiff': 'some other',\n 'ROLE_Buyer': 'someone',\n 'ROLE_Artifact': 'something',\n 'ROLE_Seller': 'some seller',\n 'ROLE_Destination': 'somewhere',\n 'ROLE_Origin': 'some place',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Agent': 'somebody or some organization',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_Target': 'some facility, someone, or some organization',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Instrument': 'some way',\n 'ROLE_Giver': 'someone',\n 'ROLE_Recipient': 'some other',\n 'ROLE_Org': 'some organization',\n 'ROLE_Place': 'somewhere',\n 'ROLE_Adjudicator': 'some adjudicator',\n },\n \"richere-en\": {\n 'ROLE_Person': 'somebody',\n 'ROLE_Entity': 'some people or some organization',\n 'ROLE_Defendant': 'somebody',\n 'ROLE_Prosecutor': 'some other',\n 'ROLE_Plaintiff': 'some other',\n 'ROLE_Buyer': 'someone',\n 'ROLE_Artifact': 'something',\n 'ROLE_Seller': 'some seller',\n 'ROLE_Destination': 'somewhere',\n 'ROLE_Origin': 'some place',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Agent': 'somebody or some organization',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_Target': 'some facility, someone, or some organization',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Instrument': 'some way',\n 'ROLE_Giver': 'someone',\n 'ROLE_Recipient': 'some other',\n 'ROLE_Org': 'some organization',\n 'ROLE_Place': 'somewhere',\n 'ROLE_Adjudicator': 'some adjudicator',\n 'ROLE_Thing': 'something',\n 'ROLE_Audience': 'some publicity',\n },\n \"m2e2\": {\n 'ROLE_Person': 'somebody',\n 'ROLE_Entity': 'some people or some organization',\n 'ROLE_Artifact': 'something',\n 'ROLE_Destination': 'somewhere',\n 'ROLE_Origin': 'some place',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Agent': 'somebody or some organization',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_Target': 'some facility, someone, or some organization',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Instrument': 'some way',\n 'ROLE_Giver': 'someone',\n 'ROLE_Recipient': 'some other',\n 'ROLE_Place': 'somewhere',\n 'ROLE_Police': 'some police',\n },\n \"geneva\": {\n \"ROLE_Act\": \"some act\",\n \"ROLE_Action\": \"some action\",\n \"ROLE_Activity\": \"some activity\",\n \"ROLE_Actor\": \"some actor\",\n \"ROLE_Addressee\": \"some addressee\",\n \"ROLE_Affected\": \"some affected\",\n \"ROLE_Affliction\": \"some affliction\",\n \"ROLE_Agent\": \"some agent\",\n \"ROLE_Agreement\": \"some agreement\",\n \"ROLE_Area\": \"some area\",\n \"ROLE_Arguer\": \"some arguer\",\n \"ROLE_Arguer2\": \"some arguer2\",\n \"ROLE_Arguers\": \"some arguers\",\n \"ROLE_Assailant\": \"some assailant\",\n \"ROLE_Asset\": \"some asset\",\n \"ROLE_Attendees\": \"some attendees\",\n \"ROLE_Attribute\": \"some attribute\",\n \"ROLE_Author\": \"some author\",\n \"ROLE_Authorities\": \"some authorities\",\n \"ROLE_Avenger\": \"some avenger\",\n \"ROLE_Barrier\": \"some barrier\",\n \"ROLE_Behavior\": \"some behavior\",\n \"ROLE_Beneficiary\": \"some beneficiary\",\n \"ROLE_Benefited_party\": \"some benefited party\",\n \"ROLE_Body\": \"some body\",\n \"ROLE_Body_location\": \"some body location\",\n \"ROLE_Body_part\": \"some body part\",\n \"ROLE_Buyer\": \"some buyer\",\n \"ROLE_Carrier\": \"some carrier\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_Charges\": \"some charges\",\n \"ROLE_Chosen\": \"some chosen\",\n \"ROLE_Circumstances\": \"some circumstances\",\n \"ROLE_Clothing\": \"some clothing\",\n \"ROLE_Cognizer\": \"some cognizer\",\n \"ROLE_Communicator\": \"some communicator\",\n \"ROLE_Competition\": \"some competition\",\n \"ROLE_Components\": \"some components\",\n \"ROLE_Configuration\": \"some configuration\",\n \"ROLE_Conqueror\": \"some conqueror\",\n \"ROLE_Container\": \"some container\",\n \"ROLE_Content\": \"some content\",\n \"ROLE_Contents\": \"some contents\",\n \"ROLE_Controlling_variable\": \"some controlling variable\",\n \"ROLE_Course\": \"some course\",\n \"ROLE_Created_entity\": \"some created entity\",\n \"ROLE_Creator\": \"some creator\",\n \"ROLE_Crime\": \"some crime\",\n \"ROLE_Culture\": \"some culture\",\n \"ROLE_Deceased\": \"some deceased\",\n \"ROLE_Decision\": \"some decision\",\n \"ROLE_Defender\": \"some defender\",\n \"ROLE_Dependent_variable\": \"some dependent variable\",\n \"ROLE_Destroyer\": \"some destroyer\",\n \"ROLE_Difference\": \"some difference\",\n \"ROLE_Dimension\": \"some dimension\",\n \"ROLE_Direction\": \"some direction\",\n \"ROLE_Distance\": \"some distance\",\n \"ROLE_Domain\": \"some domain\",\n \"ROLE_Donor\": \"some donor\",\n \"ROLE_Duration\": \"some duration\",\n \"ROLE_Earner\": \"some earner\",\n \"ROLE_Earnings\": \"some earnings\",\n \"ROLE_Effect\": \"some effect\",\n \"ROLE_Employee\": \"some employee\",\n \"ROLE_Employer\": \"some employer\",\n \"ROLE_Entity\": \"some entity\",\n \"ROLE_Evaluee\": \"some evaluee\",\n \"ROLE_Event\": \"some event\",\n \"ROLE_Evidence\": \"some evidence\",\n \"ROLE_Exchanger_1\": \"some exchanger 1\",\n \"ROLE_Exchanger_2\": \"some exchanger 2\",\n \"ROLE_Exchangers\": \"some exchangers\",\n \"ROLE_Experiencer\": \"some experiencer\",\n \"ROLE_Fact\": \"some fact\",\n \"ROLE_Factory\": \"some factory\",\n \"ROLE_Field\": \"some field\",\n \"ROLE_Figures\": \"some figures\",\n \"ROLE_Final_category\": \"some final category\",\n \"ROLE_Final_quality\": \"some final quality\",\n \"ROLE_Final_subevent\": \"some final subevent\",\n \"ROLE_Final_value\": \"some final value\",\n \"ROLE_Focal_entity\": \"some focal entity\",\n \"ROLE_Goal\": \"some goal\",\n \"ROLE_Goal_area\": \"some goal area\",\n \"ROLE_Goods\": \"some goods\",\n \"ROLE_Ground\": \"some ground\",\n \"ROLE_Group\": \"some group\",\n \"ROLE_Helper\": \"some helper\",\n \"ROLE_Hindrance\": \"some hindrance\",\n \"ROLE_Host\": \"some host\",\n \"ROLE_Imposed_purpose\": \"some imposed purpose\",\n \"ROLE_Incident\": \"some incident\",\n \"ROLE_Individuals\": \"some individuals\",\n \"ROLE_Information\": \"some information\",\n \"ROLE_Ingestibles\": \"some ingestibles\",\n \"ROLE_Ingestor\": \"some ingestor\",\n \"ROLE_Inherent_purpose\": \"some inherent purpose\",\n \"ROLE_Initial_category\": \"some initial category\",\n \"ROLE_Initial_size\": \"some initial size\",\n \"ROLE_Initial_subevent\": \"some initial subevent\",\n \"ROLE_Injured_party\": \"some injured party\",\n \"ROLE_Injury\": \"some injury\",\n \"ROLE_Inspector\": \"some inspector\",\n \"ROLE_Instrument\": \"some instrument\",\n \"ROLE_Intended_event\": \"some intended event\",\n \"ROLE_Interlocutors\": \"some interlocutors\",\n \"ROLE_Investigator\": \"some investigator\",\n \"ROLE_Issue\": \"some issue\",\n \"ROLE_Item\": \"some item\",\n \"ROLE_Killer\": \"some killer\",\n \"ROLE_Label\": \"some label\",\n \"ROLE_Location\": \"some location\",\n \"ROLE_Manipulator\": \"some manipulator\",\n \"ROLE_Manner\": \"some manner\",\n \"ROLE_Means\": \"some means\",\n \"ROLE_Medication\": \"some medication\",\n \"ROLE_Medium\": \"some medium\",\n \"ROLE_Member\": \"some member\",\n \"ROLE_Message\": \"some message\",\n \"ROLE_Money\": \"some money\",\n \"ROLE_New_leader\": \"some new leader\",\n \"ROLE_New_member\": \"some new member\",\n \"ROLE_Object\": \"some object\",\n \"ROLE_Occasion\": \"some occasion\",\n \"ROLE_Offender\": \"some offender\",\n \"ROLE_Offense\": \"some offense\",\n \"ROLE_Offerer\": \"some offerer\",\n \"ROLE_Old_leader\": \"some old leader\",\n \"ROLE_Old_order\": \"some old order\",\n \"ROLE_Part_1\": \"some part 1\",\n \"ROLE_Part_2\": \"some part 2\",\n \"ROLE_Participants\": \"some participants\",\n \"ROLE_Partners\": \"some partners\",\n \"ROLE_Parts\": \"some parts\",\n \"ROLE_Path\": \"some path\",\n \"ROLE_Patient\": \"some patient\",\n \"ROLE_Payer\": \"some payer\",\n \"ROLE_Perceiver_agentive\": \"some perceiver agentive\",\n \"ROLE_Perpetrator\": \"some perpetrator\",\n \"ROLE_Phenomenon\": \"some phenomenon\",\n \"ROLE_Place\": \"some place\",\n \"ROLE_Place_of_employment\": \"some place of employment\",\n \"ROLE_Position\": \"some position\",\n \"ROLE_Possibilities\": \"some possibilities\",\n \"ROLE_Potential_hindrance\": \"some potential hindrance\",\n \"ROLE_Problem\": \"some problem\",\n \"ROLE_Process\": \"some process\",\n \"ROLE_Producer\": \"some producer\",\n \"ROLE_Product\": \"some product\",\n \"ROLE_Project\": \"some project\",\n \"ROLE_Proposal\": \"some proposal\",\n \"ROLE_Proposed_action\": \"some proposed action\",\n \"ROLE_Protagonist\": \"some protagonist\",\n \"ROLE_Punishment\": \"some punishment\",\n \"ROLE_Purpose\": \"some purpose\",\n \"ROLE_Rate\": \"some rate\",\n \"ROLE_Ratifier\": \"some ratifier\",\n \"ROLE_Reason\": \"some reason\",\n \"ROLE_Recipient\": \"some recipient\",\n \"ROLE_Researcher\": \"some researcher\",\n \"ROLE_Resource\": \"some resource\",\n \"ROLE_Responding_entity\": \"some responding entity\",\n \"ROLE_Response\": \"some response\",\n \"ROLE_Result\": \"some result\",\n \"ROLE_Result_size\": \"some result size\",\n \"ROLE_Role\": \"some role\",\n \"ROLE_Selector\": \"some selector\",\n \"ROLE_Self_mover\": \"some self mover\",\n \"ROLE_Seller\": \"some seller\",\n \"ROLE_Sender\": \"some sender\",\n \"ROLE_Side_1\": \"some side 1\",\n \"ROLE_Side_2\": \"some side 2\",\n \"ROLE_Sides\": \"some sides\",\n \"ROLE_Signatory\": \"some signatory\",\n \"ROLE_Situation\": \"some situation\",\n \"ROLE_Skill\": \"some skill\",\n \"ROLE_Social_event\": \"some social event\",\n \"ROLE_Source\": \"some source\",\n \"ROLE_Speaker\": \"some speaker\",\n \"ROLE_Specified_entity\": \"some specified entity\",\n \"ROLE_Speed\": \"some speed\",\n \"ROLE_State\": \"some state\",\n \"ROLE_Student\": \"some student\",\n \"ROLE_Subject\": \"some subject\",\n \"ROLE_Supplier\": \"some supplier\",\n \"ROLE_Supported\": \"some supported\",\n \"ROLE_Supporter\": \"some supporter\",\n \"ROLE_Suspect\": \"some suspect\",\n \"ROLE_Task\": \"some task\",\n \"ROLE_Teacher\": \"some teacher\",\n \"ROLE_Terrorist\": \"some terrorist\",\n \"ROLE_Tested_property\": \"some tested property\",\n \"ROLE_Tester\": \"some tester\",\n \"ROLE_Text\": \"some text\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme_1\": \"some theme 1\",\n \"ROLE_Theme_2\": \"some theme 2\",\n \"ROLE_Themes\": \"some themes\",\n \"ROLE_Time\": \"some time\",\n \"ROLE_Topic\": \"some topic\",\n \"ROLE_Transferors\": \"some transferors\",\n \"ROLE_Traveler\": \"some traveler\",\n \"ROLE_Traveller\": \"some traveller\",\n \"ROLE_Treatment\": \"some treatment\",\n \"ROLE_Trigger\": \"some trigger\",\n \"ROLE_Type\": \"some type\",\n \"ROLE_Unconfirmed_content\": \"some unconfirmed content\",\n \"ROLE_Undertaking\": \"some undertaking\",\n \"ROLE_Undesirable_event\": \"some undesirable event\",\n \"ROLE_Unwanted_entity\": \"some unwanted entity\",\n \"ROLE_Useful_location\": \"some useful location\",\n \"ROLE_Value_1\": \"some value 1\",\n \"ROLE_Value_2\": \"some value 2\",\n \"ROLE_Vehicle\": \"some vehicle\",\n \"ROLE_Venue\": \"some venue\",\n \"ROLE_Victim\": \"some victim\",\n \"ROLE_Weapon\": \"some weapon\",\n \"ROLE_Wearer\": \"some wearer\",\n \"ROLE_Whole\": \"some whole\",\n },\n \"maven\": {\n },\n \"mee-en\": {\n },\n \"fewevent\": {\n },\n \"rams\": {\n \"ROLE_artifact\": \"some artifact\",\n \"ROLE_artifactmoney\": \"some artifact money\",\n \"ROLE_attacker\": \"some attacker\",\n \"ROLE_ballot\": \"some ballot\",\n \"ROLE_beneficiary\": \"some beneficiary\",\n \"ROLE_candidate\": \"some candidate\",\n \"ROLE_communicator\": \"some communicator\",\n \"ROLE_crashobject\": \"some crash object\",\n \"ROLE_crime\": \"some crime\",\n \"ROLE_damager\": \"some damager\",\n \"ROLE_damagerdestroyer\": \"some damager destroyer\",\n \"ROLE_deceased\": \"some deceased\",\n \"ROLE_defendant\": \"some defendant\",\n \"ROLE_demonstrator\": \"some demonstrator\",\n \"ROLE_destination\": \"some destination\",\n \"ROLE_destroyer\": \"some destroyer\",\n \"ROLE_detainee\": \"some detainee\",\n \"ROLE_driverpassenger\": \"some driver passenger\",\n \"ROLE_employee\": \"some employee\",\n \"ROLE_executioner\": \"some executioner\",\n \"ROLE_extraditer\": \"some extraditer\",\n \"ROLE_fireexplosionobject\": \"some fire explosion object\",\n \"ROLE_founder\": \"some founder\",\n \"ROLE_giver\": \"some giver\",\n \"ROLE_governmentbody\": \"some government body\",\n \"ROLE_gpe\": \"some gpe\",\n \"ROLE_granter\": \"some granter\",\n \"ROLE_hidingplace\": \"some hiding place\",\n \"ROLE_injurer\": \"some injurer\",\n \"ROLE_inspectedentity\": \"some inspected entity\",\n \"ROLE_inspector\": \"some inspector\",\n \"ROLE_instrument\": \"some instrument\",\n \"ROLE_investigator\": \"some investigator\",\n \"ROLE_jailer\": \"some jailer\",\n \"ROLE_judgecourt\": \"some judge court\",\n \"ROLE_killer\": \"some killer\",\n \"ROLE_law\": \"some law\",\n \"ROLE_manufacturer\": \"some manufacturer\",\n \"ROLE_money\": \"some money\",\n \"ROLE_monitor\": \"some monitor\",\n \"ROLE_monitoredentity\": \"some monitored entity\",\n \"ROLE_observedentity\": \"some observed entity\",\n \"ROLE_observer\": \"some observer\",\n \"ROLE_origin\": \"some origin\",\n \"ROLE_otherparticipant\": \"some other participant\",\n \"ROLE_participant\": \"some participant\",\n \"ROLE_passenger\": \"some passenger\",\n \"ROLE_place\": \"some place\",\n \"ROLE_placeofemployment\": \"some place of employment\",\n \"ROLE_preventer\": \"some preventer\",\n \"ROLE_prosecutor\": \"some prosecutor\",\n \"ROLE_recipient\": \"some recipient\",\n \"ROLE_rejecternullifier\": \"some rejecter nullifier\",\n \"ROLE_result\": \"some result\",\n \"ROLE_retreater\": \"some retreater\",\n \"ROLE_spy\": \"some spy\",\n \"ROLE_surrenderer\": \"some surrenderer\",\n \"ROLE_target\": \"some target\",\n \"ROLE_territoryorfacility\": \"some territoryor facility\",\n \"ROLE_transporter\": \"some transporter\",\n \"ROLE_vehicle\": \"some vehicle\",\n \"ROLE_victim\": \"some victim\",\n \"ROLE_violator\": \"some violator\",\n \"ROLE_voter\": \"some voter\",\n \"ROLE_yielder\": \"some yielder\", \n },\n \"wikievents\": {\n 'ROLE_AcquiredEntity': 'some acquired entity',\n 'ROLE_Artifact': 'some artifact',\n 'ROLE_ArtifactMoney': 'some artifact money',\n 'ROLE_Attacker': 'some attacker',\n 'ROLE_BodyPart': 'some body part',\n 'ROLE_Communicator': 'some communicator',\n 'ROLE_Components': 'some components',\n 'ROLE_CrashObject': 'some crash object',\n 'ROLE_Damager': 'some damager',\n 'ROLE_DamagerDestroyer': 'some damager destroyer',\n 'ROLE_Defeated': 'some defeated',\n 'ROLE_Defendant': 'some defendant',\n 'ROLE_Demonstrator': 'some demonstrator',\n 'ROLE_Destination': 'some destination',\n 'ROLE_Destroyer': 'some destroyer',\n 'ROLE_Detainee': 'some detainee',\n 'ROLE_Disabler': 'some disabler',\n 'ROLE_Dismantler': 'some dismantler',\n 'ROLE_Employee': 'some employee',\n 'ROLE_ExplosiveDevice': 'some explosive device',\n 'ROLE_Giver': 'some giver',\n 'ROLE_IdentifiedObject': 'some identified object',\n 'ROLE_IdentifiedRole': 'some identified role',\n 'ROLE_Identifier': 'some identifier',\n 'ROLE_Impeder': 'some impeder',\n 'ROLE_Injurer': 'some injurer',\n 'ROLE_Instrument': 'some instrument',\n 'ROLE_Investigator': 'some investigator',\n 'ROLE_Jailer': 'some jailer',\n 'ROLE_JudgeCourt': 'some judge court',\n 'ROLE_Killer': 'some killer',\n 'ROLE_Learner': 'some learner',\n 'ROLE_ManufacturerAssembler': 'some manufacturer assembler',\n 'ROLE_ObservedEntity': 'some observed entity',\n 'ROLE_Observer': 'some observer',\n 'ROLE_Origin': 'some origin',\n 'ROLE_Participant': 'some participant',\n 'ROLE_PassengerArtifact': 'some passenger artifact',\n 'ROLE_Patient': 'some patient',\n 'ROLE_PaymentBarter': 'some payment barter',\n 'ROLE_Perpetrator': 'some perpetrator',\n 'ROLE_Place': 'some place',\n 'ROLE_PlaceOfEmployment': 'some place of employment',\n 'ROLE_Position': 'some position',\n 'ROLE_Preventer': 'some preventer',\n 'ROLE_Prosecutor': 'some prosecutor',\n 'ROLE_Recipient': 'some recipient',\n 'ROLE_Regulator': 'some regulator',\n 'ROLE_Researcher': 'some researcher',\n 'ROLE_Subject': 'some subject',\n 'ROLE_Target': 'some target',\n 'ROLE_TeacherTrainer': 'some teacher trainer',\n 'ROLE_Topic': 'some topic',\n 'ROLE_Transporter': 'some transporter',\n 'ROLE_Treater': 'some treater',\n 'ROLE_Vehicle': 'some vehicle',\n 'ROLE_Victim': 'some victim',\n 'ROLE_Victor': 'some victor',\n },\n \"phee\": {\n \"ROLE_Combination_Drug\": \"some combination drug\",\n \"ROLE_Effect\": \"some effect\",\n \"ROLE_Subject\": \"some subject\",\n \"ROLE_Subject_Age\": \"some subject age\",\n \"ROLE_Subject_Disorder\": \"some subject disorder\",\n \"ROLE_Subject_Gender\": \"some subject gender\",\n \"ROLE_Subject_Population\": \"some subject population\",\n \"ROLE_Subject_Race\": \"some subject race\",\n \"ROLE_Treatment\": \"some treatment\",\n \"ROLE_Treatment_Disorder\": \"some treatment disorder\",\n \"ROLE_Treatment_Dosage\": \"some treatment dosage\",\n \"ROLE_Treatment_Drug\": \"some treatment drug\",\n \"ROLE_Treatment_Duration\": \"some treatment duration\",\n \"ROLE_Treatment_Freq\": \"some treatment frequency\",\n \"ROLE_Treatment_Route\": \"some treatment route\",\n \"ROLE_Treatment_Time_elapsed\": \"some treatment time elapsed\",\n },\n \"casie\": {\n \"ROLE_Attack-Pattern\": \"some attack pattern\",\n \"ROLE_Attacker\": \"some attacker\",\n \"ROLE_CVE\": \"some cve\",\n \"ROLE_Capabilities\": \"some capabilities\",\n \"ROLE_Compromised-Data\": \"some compromised data\",\n \"ROLE_Damage-Amount\": \"some damage amount\",\n \"ROLE_Discoverer\": \"some discoverer\",\n \"ROLE_Issues-Addressed\": \"some issues addressed\",\n \"ROLE_Number-of-Data\": \"some number of data\",\n \"ROLE_Number-of-Victim\": \"some number of victim\",\n \"ROLE_Patch\": \"some patch\",\n \"ROLE_Patch-Number\": \"some patch number\",\n \"ROLE_Payment-Method\": \"some payment method\",\n \"ROLE_Place\": \"some place\",\n \"ROLE_Price\": \"some price\",\n \"ROLE_Purpose\": \"some purpose\",\n \"ROLE_Releaser\": \"some releaser\",\n \"ROLE_Supported_Platform\": \"some supported platform\",\n \"ROLE_Time\": \"some time\",\n \"ROLE_Tool\": \"some tool\",\n \"ROLE_Trusted-Entity\": \"some trusted entity\",\n \"ROLE_Victim\": \"some victim\",\n \"ROLE_Vulnerability\": \"some vulnerability\",\n \"ROLE_Vulnerable_System\": \"some vulnerable system\",\n \"ROLE_Vulnerable_System_Owner\": \"some vulnerable system owner\",\n \"ROLE_Vulnerable_System_Version\": \"some vulnerable system version\",\n },\n \"mlee\": {\n \"ROLE_AtLoc\": \"some at loc\",\n \"ROLE_CSite\": \"some csite\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_FromLoc\": \"some from loc\",\n \"ROLE_Instrument\": \"some instrument\",\n \"ROLE_Instrument2\": \"some instrument 2\",\n \"ROLE_Participant\": \"some participant\",\n \"ROLE_Participant2\": \"some participant 2\",\n \"ROLE_Participant3\": \"some participant 3\",\n \"ROLE_Participant4\": \"some participant 4\",\n \"ROLE_Site\": \"some site\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme2\": \"some theme 2\",\n \"ROLE_ToLoc\": \"some to loc\",\n },\n \"genia2011\": {\n \"ROLE_AtLoc\": \"some at loc\",\n \"ROLE_CSite\": \"some csite\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_Site\": \"some site\",\n \"ROLE_Site2\": \"some site 2\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme2\": \"some theme 2\",\n \"ROLE_Theme3\": \"some theme 3\",\n \"ROLE_Theme4\": \"some theme 4\",\n \"ROLE_ToLoc\": \"some to loc\",\n\n },\n \"genia2013\": {\n \"ROLE_CSite\": \"some csite\",\n \"ROLE_Cause\": \"some cause\",\n \"ROLE_Site\": \"some site\",\n \"ROLE_Site2\": \"some site 2\",\n \"ROLE_Theme\": \"some theme\",\n \"ROLE_Theme2\": \"some theme 2\",\n \"ROLE_ToLoc\": \"some to loc\",\n },\n}" } ]
import os, sys, logging, tqdm, pprint import torch import numpy as np import ipdb from collections import namedtuple from transformers import BartTokenizer, AutoTokenizer, get_linear_schedule_with_warmup from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .EDmodel import DegreeEDModel from .template_generate import event_template, eve_template_generator from .pattern import patterns, ROLE_PH_MAP from scorer import compute_ED_scores, print_scores
12,307
logger = logging.getLogger(__name__) EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_text', 'batch_piece_idxs', 'batch_token_start_idxs', 'batch_input', 'batch_target', 'batch_info'] EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields)) def ED_collate_fn(batch): return EDBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_piece_idxs=[instance["piece_idxs"] for instance in batch], batch_token_start_idxs=[instance["token_start_idxs"] for instance in batch], batch_input=[instance["input"] for instance in batch], batch_target=[instance["target"] for instance in batch], batch_info=[instance["info"] for instance in batch], ) def get_span_idx_tri(pieces, token_start_idxs, span, tokenizer, trigger_span=None): """ This function is how we map the generated prediction back to span prediction. Detailed Explanation: We will first split our prediction and use tokenizer to tokenize our predicted "span" into pieces. Then, we will find whether we can find a continuous span in the original "pieces" can match tokenized "span". If it is an argument/relation extraction task, we will return the one which is closest to the trigger_span. """ words = [] for s in span.split(' '): words.extend(tokenizer.encode(s, add_special_tokens=False)) candidates = [] for i in range(len(pieces)): j = 0 k = 0 while j < len(words) and i+k < len(pieces): if pieces[i+k] == words[j]: j += 1 k += 1 elif tokenizer.decode(words[j]) == "": j += 1 elif tokenizer.decode(pieces[i+k]) == "": k += 1 else: break if j == len(words): candidates.append((i, i+k)) candidates = [(token_start_idxs.index(c1), token_start_idxs.index(c2)) for c1, c2 in candidates if c1 in token_start_idxs and c2 in token_start_idxs] if len(candidates) < 1: return [(-1, -1)] else: if trigger_span is None: return candidates else: return sorted(candidates, key=lambda x: np.abs(trigger_span[0]-x[0])) class DegreeEDTrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None def load_model(self, checkpoint=None): if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.tokenizer = state["tokenizer"] self.type_set = state["type_set"] self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: logger.info(f"Loading model from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('facebook/bart-'): self.tokenizer = BartTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, use_fast=False) special_tokens = ['<Trigger>', '<sep>', '<None>'] logger.info(f"Add tokens {special_tokens}") self.tokenizer.add_tokens(special_tokens) self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.cuda(device=self.config.gpu_device) self.generate_vocab() def generate_vocab(self): event_type_itos = sorted(self.type_set["trigger"]) event_type_stoi = {e: i for i, e in enumerate(event_type_itos)} self.vocab = {"event_type_itos": event_type_itos, "event_type_stoi": event_type_stoi, } def process_data_for_training(self, data): assert self.tokenizer, "Please load model and tokneizer before processing data!" logger.info("Processing data...") n_total = 0 new_data = [] for dt in tqdm.tqdm(data, ncols=100): n_total += 1 _triggers = [t[:3] for t in dt["triggers"]]
logger = logging.getLogger(__name__) EDBatch_fields = ['batch_doc_id', 'batch_wnd_id', 'batch_tokens', 'batch_text', 'batch_piece_idxs', 'batch_token_start_idxs', 'batch_input', 'batch_target', 'batch_info'] EDBatch = namedtuple('EDBatch', field_names=EDBatch_fields, defaults=[None] * len(EDBatch_fields)) def ED_collate_fn(batch): return EDBatch( batch_doc_id=[instance["doc_id"] for instance in batch], batch_wnd_id=[instance["wnd_id"] for instance in batch], batch_tokens=[instance["tokens"] for instance in batch], batch_text=[instance["text"] for instance in batch], batch_piece_idxs=[instance["piece_idxs"] for instance in batch], batch_token_start_idxs=[instance["token_start_idxs"] for instance in batch], batch_input=[instance["input"] for instance in batch], batch_target=[instance["target"] for instance in batch], batch_info=[instance["info"] for instance in batch], ) def get_span_idx_tri(pieces, token_start_idxs, span, tokenizer, trigger_span=None): """ This function is how we map the generated prediction back to span prediction. Detailed Explanation: We will first split our prediction and use tokenizer to tokenize our predicted "span" into pieces. Then, we will find whether we can find a continuous span in the original "pieces" can match tokenized "span". If it is an argument/relation extraction task, we will return the one which is closest to the trigger_span. """ words = [] for s in span.split(' '): words.extend(tokenizer.encode(s, add_special_tokens=False)) candidates = [] for i in range(len(pieces)): j = 0 k = 0 while j < len(words) and i+k < len(pieces): if pieces[i+k] == words[j]: j += 1 k += 1 elif tokenizer.decode(words[j]) == "": j += 1 elif tokenizer.decode(pieces[i+k]) == "": k += 1 else: break if j == len(words): candidates.append((i, i+k)) candidates = [(token_start_idxs.index(c1), token_start_idxs.index(c2)) for c1, c2 in candidates if c1 in token_start_idxs and c2 in token_start_idxs] if len(candidates) < 1: return [(-1, -1)] else: if trigger_span is None: return candidates else: return sorted(candidates, key=lambda x: np.abs(trigger_span[0]-x[0])) class DegreeEDTrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None def load_model(self, checkpoint=None): if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.tokenizer = state["tokenizer"] self.type_set = state["type_set"] self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: logger.info(f"Loading model from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('facebook/bart-'): self.tokenizer = BartTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, use_fast=False) special_tokens = ['<Trigger>', '<sep>', '<None>'] logger.info(f"Add tokens {special_tokens}") self.tokenizer.add_tokens(special_tokens) self.model = DegreeEDModel(self.config, self.tokenizer, self.type_set) self.model.cuda(device=self.config.gpu_device) self.generate_vocab() def generate_vocab(self): event_type_itos = sorted(self.type_set["trigger"]) event_type_stoi = {e: i for i, e in enumerate(event_type_itos)} self.vocab = {"event_type_itos": event_type_itos, "event_type_stoi": event_type_stoi, } def process_data_for_training(self, data): assert self.tokenizer, "Please load model and tokneizer before processing data!" logger.info("Processing data...") n_total = 0 new_data = [] for dt in tqdm.tqdm(data, ncols=100): n_total += 1 _triggers = [t[:3] for t in dt["triggers"]]
event_template = eve_template_generator(self.config.dataset, dt["tokens"], _triggers, [], self.config.input_style, self.config.output_style, self.vocab, True)
2
2023-11-15 21:32:56+00:00
16k
ahayler/s4c
scripts/videos/gen_vid_nvs.py
[ { "identifier": "BTSNet", "path": "models/bts/model/models_bts.py", "snippet": "class BTSNet(torch.nn.Module):\n def __init__(self, conf):\n super().__init__()\n\n self.d_min = conf.get(\"z_near\")\n self.d_max = conf.get(\"z_far\")\n\n self.learn_empty = conf.get(\"learn_empty\", True)\n self.empty_empty = conf.get(\"empty_empty\", False)\n self.inv_z = conf.get(\"inv_z\", True)\n\n self.color_interpolation = conf.get(\"color_interpolation\", \"bilinear\")\n self.code_mode = conf.get(\"code_mode\", \"z\")\n if self.code_mode not in [\"z\", \"distance\"]:\n raise NotImplementedError(f\"Unknown mode for positional encoding: {self.code_mode}\")\n\n self.encoder = make_backbone(conf[\"encoder\"])\n self.code_xyz = PositionalEncoding.from_conf(conf[\"code\"], d_in=3)\n\n self.flip_augmentation = conf.get(\"flip_augmentation\", False)\n\n self.return_sample_depth = conf.get(\"return_sample_depth\", False)\n\n self.sample_color = conf.get(\"sample_color\", True)\n\n d_in = self.encoder.latent_size + self.code_xyz.d_out\n d_out = 1 if self.sample_color else 4\n\n self._d_in = d_in\n self._d_out = d_out\n\n self.mlp_coarse = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=d_out)\n self.mlp_fine = make_mlp(conf[\"mlp_fine\"], d_in, d_out=d_out, allow_empty=True)\n\n # MLP for segmentation classes\n # TODO: Find the output dimensions automatically\n self.segmentation_mode = conf.get('segmentation_mode', None)\n if self.segmentation_mode == 'KITTI-360':\n self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=21)\n # self.mlp_segmentation = make_segnet(d_in=d_in, d_out=21, d_hidden_list=[64])\n elif self.segmentation_mode == 'panoptic_deeplab':\n # self.mlp_segmentation = make_mlp(conf[\"mlp_coarse\"], d_in, d_out=19)\n self.mlp_segmentation = make_segnet(d_in=d_in, d_out=19, d_hidden_list=[64])\n # self.mlp_segmentation = make_intercept_model(d_in, d_out=21)\n\n if self.learn_empty:\n self.empty_feature = nn.Parameter(torch.randn((self.encoder.latent_size,), requires_grad=True))\n\n self._scale = 0\n\n def set_scale(self, scale):\n self._scale = scale\n\n def get_scale(self):\n return self._scale\n\n def compute_grid_transforms(self, *args, **kwargs):\n pass\n\n def encode(self, images, Ks, poses_c2w, ids_encoder=None, ids_render=None, images_alt=None, combine_ids=None):\n poses_w2c = torch.inverse(poses_c2w)\n\n if ids_encoder is None:\n images_encoder = images\n Ks_encoder = Ks\n poses_w2c_encoder = poses_w2c\n ids_encoder = list(range(len(images)))\n else:\n images_encoder = images[:, ids_encoder]\n Ks_encoder = Ks[:, ids_encoder]\n poses_w2c_encoder = poses_w2c[:, ids_encoder]\n\n if images_alt is not None:\n images = images_alt\n else:\n images = images * .5 + .5\n\n if ids_render is None:\n images_render = images\n Ks_render = Ks\n poses_w2c_render = poses_w2c\n ids_render = list(range(len(images)))\n else:\n images_render = images[:, ids_render]\n Ks_render = Ks[:, ids_render]\n poses_w2c_render = poses_w2c[:, ids_render]\n\n if combine_ids is not None:\n combine_ids = list(list(group) for group in combine_ids)\n get_combined = set(sum(combine_ids, []))\n for i in range(images.shape[1]):\n if i not in get_combined:\n combine_ids.append((i,))\n remap_encoder = {v: i for i, v in enumerate(ids_encoder)}\n remap_render = {v: i for i, v in enumerate(ids_render)}\n comb_encoder = [[remap_encoder[i] for i in group if i in ids_encoder] for group in combine_ids]\n comb_render = [[remap_render[i] for i in group if i in ids_render] for group in combine_ids]\n comb_encoder = [group for group in comb_encoder if len(group) > 0]\n comb_render = [group for group in comb_render if len(group) > 0]\n else:\n comb_encoder = None\n comb_render = None\n\n n, nv, c, h, w = images_encoder.shape\n c_l = self.encoder.latent_size\n\n if self.flip_augmentation and self.training:\n do_flip = (torch.rand(1) > .5).item()\n else:\n do_flip = False\n\n if do_flip:\n images_encoder = torch.flip(images_encoder, dims=(-1, ))\n\n image_latents_ms = self.encoder(images_encoder.view(n * nv, c, h, w))\n\n if do_flip:\n image_latents_ms = [torch.flip(il, dims=(-1, )) for il in image_latents_ms]\n\n _, _, h_, w_ = image_latents_ms[0].shape\n image_latents_ms = [F.interpolate(image_latents, (h_, w_)).view(n, nv, c_l, h_, w_) for image_latents in image_latents_ms]\n\n if torch.any(torch.isnan(torch.stack(image_latents_ms))):\n self.encoder(images_encoder.view(n * nv, c, h, w))\n # raise Exception(\"NaN in encoded features.\")\n\n self.grid_f_features = image_latents_ms\n self.grid_f_Ks = Ks_encoder\n self.grid_f_poses_w2c = poses_w2c_encoder\n self.grid_f_combine = comb_encoder\n\n self.grid_c_imgs = images_render\n self.grid_c_Ks = Ks_render\n self.grid_c_poses_w2c = poses_w2c_render\n self.grid_c_combine = comb_render\n\n def sample_features(self, xyz, use_single_featuremap=True):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_f_features[self._scale].shape\n\n # if use_single_featuremap:\n # nv = 1\n\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_f_poses_w2c[:, :nv, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_f_Ks[:, :nv] @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n if self.code_mode == \"z\":\n # Get z into [-1, 1] range\n if self.inv_z:\n z = (1 / z.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n z = (z - self.d_min) / (self.d_max - self.d_min)\n z = 2 * z - 1\n xyz_projected = torch.cat((xy, z), dim=-1)\n elif self.code_mode == \"distance\":\n if self.inv_z:\n distance = (1 / distance.clamp_min(EPS) - 1 / self.d_max) / (1 / self.d_min - 1 / self.d_max)\n else:\n distance = (distance - self.d_min) / (self.d_max - self.d_min)\n distance = 2 * distance - 1\n xyz_projected = torch.cat((xy, distance), dim=-1)\n xyz_code = self.code_xyz(xyz_projected.view(n * nv * n_pts, -1)).view(n, nv, n_pts, -1)\n\n feature_map = self.grid_f_features[self._scale][:, :nv]\n # These samples are from different scales\n if self.learn_empty:\n empty_feature_expanded = self.empty_feature.view(1, 1, 1, c).expand(n, nv, n_pts, c)\n\n sampled_features = F.grid_sample(feature_map.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=\"bilinear\", padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n\n if self.learn_empty:\n sampled_features[invalid.expand(-1, -1, -1, c)] = empty_feature_expanded[invalid.expand(-1, -1, -1, c)]\n\n sampled_features = torch.cat((sampled_features, xyz_code), dim=-1)\n\n # If there are multiple frames with predictions, reduce them.\n # TODO: Technically, this implementations should be improved if we use multiple frames.\n # The reduction should only happen after we perform the unprojection.\n\n if self.grid_f_combine is not None:\n invalid_groups = []\n sampled_features_groups = []\n\n for group in self.grid_f_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_features_groups.append(sampled_features[:, group])\n\n invalid_to_combine = invalid[:, group]\n features_to_combine = sampled_features[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n features_picked = torch.gather(features_to_combine, dim=1, index=indices.expand(-1, -1, -1, features_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_features_groups.append(features_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_features = torch.cat(sampled_features_groups, dim=1)\n\n if use_single_featuremap:\n sampled_features = sampled_features.mean(dim=1)\n invalid = torch.any(invalid, dim=1)\n\n return sampled_features, invalid\n\n def sample_colors(self, xyz):\n n, n_pts, _ = xyz.shape\n n, nv, c, h, w = self.grid_c_imgs.shape\n xyz = xyz.unsqueeze(1) # (n, 1, pts, 3)\n ones = torch.ones_like(xyz[..., :1])\n xyz = torch.cat((xyz, ones), dim=-1)\n xyz_projected = ((self.grid_c_poses_w2c[:, :, :3, :]) @ xyz.permute(0, 1, 3, 2))\n distance = torch.norm(xyz_projected, dim=-2).unsqueeze(-1)\n xyz_projected = (self.grid_c_Ks @ xyz_projected).permute(0, 1, 3, 2)\n xy = xyz_projected[:, :, :, [0, 1]]\n z = xyz_projected[:, :, :, 2:3]\n\n # This scales the x-axis into the right range.\n xy = xy / z.clamp_min(EPS)\n invalid = (z <= EPS) | (xy[:, :, :, :1] < -1) | (xy[:, :, :, :1] > 1) | (xy[:, :, :, 1:2] < -1) | (xy[:, :, :, 1:2] > 1)\n\n sampled_colors = F.grid_sample(self.grid_c_imgs.view(n * nv, c, h, w), xy.view(n * nv, 1, -1, 2), mode=self.color_interpolation, padding_mode=\"border\", align_corners=False).view(n, nv, c, n_pts).permute(0, 1, 3, 2)\n assert not torch.any(torch.isnan(sampled_colors))\n\n if self.grid_c_combine is not None:\n invalid_groups = []\n sampled_colors_groups = []\n\n for group in self.grid_c_combine:\n if len(group) == 1:\n invalid_groups.append(invalid[:, group])\n sampled_colors_groups.append(sampled_colors[:, group])\n continue\n\n invalid_to_combine = invalid[:, group]\n colors_to_combine = sampled_colors[:, group]\n\n indices = torch.min(invalid_to_combine, dim=1, keepdim=True)[1]\n invalid_picked = torch.gather(invalid_to_combine, dim=1, index=indices)\n colors_picked = torch.gather(colors_to_combine, dim=1, index=indices.expand(-1, -1, -1, colors_to_combine.shape[-1]))\n\n invalid_groups.append(invalid_picked)\n sampled_colors_groups.append(colors_picked)\n\n invalid = torch.cat(invalid_groups, dim=1)\n sampled_colors = torch.cat(sampled_colors_groups, dim=1)\n\n if self.return_sample_depth:\n distance = distance.view(n, nv, n_pts, 1)\n sampled_colors = torch.cat((sampled_colors, distance), dim=-1)\n\n return sampled_colors, invalid\n\n def forward(self, xyz, coarse=True, viewdirs=None, far=False, only_density=False, predict_segmentation=False):\n \"\"\"\n Predict (r, g, b, sigma) at world space points xyz.\n Please call encode first!\n :param xyz (B, 3)\n B is batch of points (in rays)\n :param predict_segmentation, if true also return the segmentation distribution for all the points\n :return (B, 4) r g b sigma\n \"\"\"\n\n with profiler.record_function(\"model_inference\"):\n n, n_pts, _ = xyz.shape\n nv = self.grid_c_imgs.shape[1]\n\n if self.grid_c_combine is not None:\n nv = len(self.grid_c_combine)\n\n # Sampled features all has shape: scales [n, n_pts, c + xyz_code]\n sampled_features, invalid_features = self.sample_features(xyz, use_single_featuremap=not only_density) # invalid features (n, n_pts, 1)\n sampled_features = sampled_features.reshape(n * n_pts, -1)\n\n mlp_input = sampled_features.view(n, n_pts, -1)\n\n # Camera frustum culling stuff, currently disabled\n combine_index = None\n dim_size = None\n\n # Run main NeRF network\n if coarse or self.mlp_fine is None:\n mlp_output = self.mlp_coarse(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n else:\n mlp_output = self.mlp_fine(\n mlp_input,\n combine_inner_dims=(n_pts,),\n combine_index=combine_index,\n dim_size=dim_size,\n )\n\n segs = None\n if predict_segmentation:\n segs = self.mlp_segmentation(mlp_input)\n # print(next(self.mlp_segmentation.parameters()))\n # softmax to get a class distribution\n segs = F.softmax(segs, dim=2)\n # (n, pts, c) -> (n, n_pts, c)\n mlp_output = mlp_output.reshape(n, n_pts, self._d_out)\n\n if self.sample_color:\n sigma = mlp_output[..., :1]\n sigma = F.softplus(sigma)\n rgb, invalid_colors = self.sample_colors(xyz) # (n, nv, pts, 3)\n else:\n sigma = mlp_output[..., :1]\n sigma = F.relu(sigma)\n rgb = mlp_output[..., 1:4].reshape(n, 1, n_pts, 3)\n rgb = F.sigmoid(rgb)\n invalid_colors = invalid_features.unsqueeze(-2)\n nv = 1\n\n if self.empty_empty:\n sigma[invalid_features[..., 0]] = 0\n # TODO: Think about this!\n # Since we don't train the colors directly, lets use softplus instead of relu\n\n if not only_density:\n _, _, _, c = rgb.shape\n rgb = rgb.permute(0, 2, 1, 3).reshape(n, n_pts, nv * c) # (n, pts, nv * 3)\n invalid_colors = invalid_colors.permute(0, 2, 1, 3).reshape(n, n_pts, nv)\n\n invalid = invalid_colors | invalid_features # Invalid features gets broadcasted to (n, n_pts, nv)\n invalid = invalid.to(rgb.dtype)\n else:\n rgb = torch.zeros((n, n_pts, nv * 3), device=sigma.device)\n invalid = invalid_features.to(sigma.dtype)\n\n if predict_segmentation:\n return rgb, invalid, sigma, segs\n else:\n return rgb, invalid, sigma" }, { "identifier": "ImageRaySampler", "path": "models/bts/model/ray_sampler.py", "snippet": "class ImageRaySampler(RaySampler):\n def __init__(self, z_near, z_far, height=None, width=None, channels=3, norm_dir=True):\n self.z_near = z_near\n self.z_far = z_far\n self.height = height\n self.width = width\n self.channels = channels\n self.norm_dir = norm_dir\n\n def sample(self, images, poses, projs, segs=None, sample_segs=False):\n n, v, _, _ = poses.shape\n\n if self.height is None:\n self.height, self.width = images.shape[-2:]\n\n all_rgb_gt = []\n all_rays = []\n all_segs_gt = []\n\n for n_ in range(n):\n focals = projs[n_, :, [0, 1], [0, 1]]\n centers = projs[n_, :, [0, 1], [2, 2]]\n\n rays = util.gen_rays(poses[n_].view(-1, 4, 4), self.width, self.height, focal=focals, c=centers, z_near=self.z_near, z_far=self.z_far, norm_dir=self.norm_dir).view(-1, 8)\n all_rays.append(rays)\n\n if images is not None:\n rgb_gt = images[n_].view(-1, self.channels, self.height, self.width)\n rgb_gt = (rgb_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, self.channels))\n all_rgb_gt.append(rgb_gt)\n\n if sample_segs:\n segs_gt = segs[n_].view(-1, 1, self.height, self.width)\n segs_gt = (segs_gt.permute(0, 2, 3, 1).contiguous().reshape(-1, 1))\n all_segs_gt.append(segs_gt)\n\n all_rays = torch.stack(all_rays)\n if images is not None:\n all_rgb_gt = torch.stack(all_rgb_gt)\n else:\n all_rgb_gt = None\n\n if sample_segs:\n all_segs_gt = torch.stack(all_segs_gt)\n # the None accounts for the patch_to_image\n return all_rays, all_rgb_gt, all_segs_gt, None\n else:\n return all_rays, all_rgb_gt\n\n def reconstruct(self, render_dict, channels=None, reconstruct_segmentation=False):\n coarse = render_dict[\"coarse\"]\n fine = render_dict[\"fine\"]\n\n if channels is None:\n channels = self.channels\n\n if reconstruct_segmentation:\n c_segmentation = coarse[\"segs\"]\n # c_segmentation_raw = coarse[\"segs_raw\"]\n n_classes = c_segmentation.shape[-1]\n # n_samples = c_segmentation_raw.shape[-2]\n\n c_rgb = coarse[\"rgb\"] # n, n_pts, v * 3\n c_weights = coarse[\"weights\"]\n c_depth = coarse[\"depth\"]\n c_invalid = coarse[\"invalid\"]\n\n f_rgb = fine[\"rgb\"] # n, n_pts, v * 3\n f_weights = fine[\"weights\"]\n f_depth = fine[\"depth\"]\n f_invalid = fine[\"invalid\"]\n\n n, n_pts, v_c = c_rgb.shape\n v_in = n_pts // (self.height * self.width)\n v_render = v_c // channels\n c_n_smps = c_weights.shape[-1]\n f_n_smps = f_weights.shape[-1]\n # (This can be a different v from the sample method)\n\n if reconstruct_segmentation:\n coarse[\"segs\"] = c_segmentation.view(n, v_in, self.height, self.width, n_classes)\n # coarse[\"segs_raw\"] = c_segmentation_raw.view(n, v_in, self.height, self.width, n_samples, n_classes)\n\n coarse[\"rgb\"] = c_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n coarse[\"weights\"] = c_weights.view(n, v_in, self.height, self.width, c_n_smps)\n coarse[\"depth\"] = c_depth.view(n, v_in, self.height, self.width)\n coarse[\"invalid\"] = c_invalid.view(n, v_in, self.height, self.width, c_n_smps, v_render)\n\n fine[\"rgb\"] = f_rgb.view(n, v_in, self.height, self.width, v_render, channels)\n fine[\"weights\"] = f_weights.view(n, v_in, self.height, self.width, f_n_smps)\n fine[\"depth\"] = f_depth.view(n, v_in, self.height, self.width)\n fine[\"invalid\"] = f_invalid.view(n, v_in, self.height, self.width, f_n_smps, v_render)\n\n if \"alphas\" in coarse:\n c_alphas = coarse[\"alphas\"]\n f_alphas = fine[\"alphas\"]\n coarse[\"alphas\"] = c_alphas.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"alphas\"] = f_alphas.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"z_samps\" in coarse:\n c_z_samps = coarse[\"z_samps\"]\n f_z_samps = fine[\"z_samps\"]\n coarse[\"z_samps\"] = c_z_samps.view(n, v_in, self.height, self.width, c_n_smps)\n fine[\"z_samps\"] = f_z_samps.view(n, v_in, self.height, self.width, f_n_smps)\n\n if \"rgb_samps\" in coarse:\n c_rgb_samps = coarse[\"rgb_samps\"]\n f_rgb_samps = fine[\"rgb_samps\"]\n coarse[\"rgb_samps\"] = c_rgb_samps.view(n, v_in, self.height, self.width, c_n_smps, v_render, channels)\n fine[\"rgb_samps\"] = f_rgb_samps.view(n, v_in, self.height, self.width, f_n_smps, v_render, channels)\n\n render_dict[\"coarse\"] = coarse\n render_dict[\"fine\"] = fine\n\n if \"rgb_gt\" in render_dict:\n rgb_gt = render_dict[\"rgb_gt\"]\n render_dict[\"rgb_gt\"] = rgb_gt.view(n, v_in, self.height, self.width, channels)\n\n return render_dict" }, { "identifier": "NeRFRenderer", "path": "models/common/render/nerf.py", "snippet": "class NeRFRenderer(torch.nn.Module):\n \"\"\"\n NeRF differentiable renderer\n :param n_coarse number of coarse (binned uniform) samples\n :param n_fine number of fine (importance) samples\n :param n_fine_depth number of expected depth samples\n :param noise_std noise to add to sigma. We do not use it\n :param depth_std noise for depth samples\n :param eval_batch_size ray batch size for evaluation\n :param white_bkgd if true, background color is white; else black\n :param lindisp if to use samples linear in disparity instead of distance\n :param sched ray sampling schedule. list containing 3 lists of equal length.\n sched[0] is list of iteration numbers,\n sched[1] is list of coarse sample numbers,\n sched[2] is list of fine sample numbers\n \"\"\"\n\n def __init__(\n self,\n n_coarse=128,\n n_fine=0,\n n_fine_depth=0,\n noise_std=0.0,\n depth_std=0.01,\n eval_batch_size=100000,\n white_bkgd=False,\n lindisp=False,\n sched=None, # ray sampling schedule for coarse and fine rays\n hard_alpha_cap=False\n ):\n super().__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n\n self.noise_std = noise_std\n self.depth_std = depth_std\n\n self.eval_batch_size = eval_batch_size\n self.white_bkgd = white_bkgd\n self.lindisp = lindisp\n if lindisp:\n print(\"Using linear displacement rays\")\n self.using_fine = n_fine > 0\n self.sched = sched\n if sched is not None and len(sched) == 0:\n self.sched = None\n self.register_buffer(\n \"iter_idx\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.register_buffer(\n \"last_sched\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.hard_alpha_cap = hard_alpha_cap\n\n def sample_coarse(self, rays):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :return (B, Kc)\n \"\"\"\n device = rays.device\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n\n step = 1.0 / self.n_coarse\n B = rays.shape[0]\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).repeat(B, 1) # (B, Kc)\n z_steps += torch.rand_like(z_steps) * step\n if not self.lindisp: # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n return 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kc)\n\n def sample_coarse_from_dist(self, rays, weights, z_samp):\n device = rays.device\n B = rays.shape[0]\n\n num_bins = weights.shape[-1]\n num_samples = self.n_coarse\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(B, num_samples, dtype=torch.float32, device=device) # (B, Kf)\n interval_ids = torch.searchsorted(cdf, u, right=True) - 1 # (B, Kf)\n interval_ids = torch.clamp(interval_ids, 0, num_samples-1)\n interval_interp = torch.rand_like(interval_ids, dtype=torch.float32)\n\n # z_samps describe the centers of the respective histogram bins. Therefore, we have to extend them to the left and right\n if self.lindisp:\n z_samp = 1 / z_samp\n\n centers = .5 * (z_samp[:, 1:] + z_samp[:, :-1])\n interval_borders = torch.cat((z_samp[:, :1], centers, z_samp[:, -1:]), dim=-1)\n\n left_border = torch.gather(interval_borders, dim=-1, index=interval_ids)\n right_border = torch.gather(interval_borders, dim=-1, index=interval_ids+1)\n\n z_samp_new = left_border * (1 - interval_interp) + right_border * interval_interp\n\n if self.lindisp:\n z_samp_new = 1 / z_samp_new\n\n assert not torch.any(torch.isnan(z_samp_new))\n\n return z_samp_new\n\n def sample_fine(self, rays, weights):\n \"\"\"min\n Weighted stratified (importance) sample\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param weights (B, Kc)\n :return (B, Kf-Kfd)\n \"\"\"\n device = rays.device\n B = rays.shape[0]\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(\n B, self.n_fine - self.n_fine_depth, dtype=torch.float32, device=device\n ) # (B, Kf)\n inds = torch.searchsorted(cdf, u, right=True).float() - 1.0 # (B, Kf)\n inds = torch.clamp_min(inds, 0.0)\n\n z_steps = (inds + torch.rand_like(inds)) / self.n_coarse # (B, Kf)\n\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n if not self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param depth (B)\n :return (B, Kfd)\n \"\"\"\n z_samp = depth.unsqueeze(1).repeat((1, self.n_fine_depth))\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n z_samp = torch.max(torch.min(z_samp, rays[:, -1:]), rays[:, -2:-1])\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def composite(self, model, rays, z_samp, coarse=True, sb=0, predict_segmentation=False):\n \"\"\"\n Render RGB and depth for each ray using NeRF alpha-compositing formula,\n given sampled positions along each ray (see sample_*)\n :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))\n should also support 'coarse' boolean argument\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param z_samp z positions sampled for each ray (B, K)\n :param coarse whether to evaluate using coarse NeRF\n :param predict_segmentation if true also predict the semantic distribution\n :param sb super-batch dimension; 0 = disable\n :return weights (B, K), rgb (B, 3), depth (B)\n \"\"\"\n with profiler.record_function(\"renderer_composite\"):\n B, K = z_samp.shape\n\n deltas = z_samp[:, 1:] - z_samp[:, :-1] # (B, K-1)\n delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # infty (B, 1)\n # delta_inf = rays[:, -1:] - z_samp[:, -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (B, K)\n\n # (B, K, 3)\n points = rays[:, None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]\n points = points.reshape(-1, 3) # (B*K, 3)\n\n use_viewdirs = hasattr(model, \"use_viewdirs\") and model.use_viewdirs\n\n rgbs_all, invalid_all, sigmas_all, segs_all = [], [], [], []\n if sb > 0:\n points = points.reshape(\n sb, -1, 3\n ) # (SB, B'*K, 3) B' is real ray batch size\n eval_batch_size = (self.eval_batch_size - 1) // sb + 1\n eval_batch_dim = 1\n else:\n eval_batch_size = self.eval_batch_size\n eval_batch_dim = 0\n\n split_points = torch.split(points, eval_batch_size, dim=eval_batch_dim)\n if use_viewdirs:\n dim1 = K\n viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1) # (B, K, 3)\n if sb > 0:\n viewdirs = viewdirs.reshape(sb, -1, 3) # (SB, B'*K, 3)\n else:\n viewdirs = viewdirs.reshape(-1, 3) # (B*K, 3)\n split_viewdirs = torch.split(\n viewdirs, eval_batch_size, dim=eval_batch_dim\n )\n for pnts, dirs in zip(split_points, split_viewdirs):\n rgbs, invalid, sigmas = model(pnts, coarse=coarse, viewdirs=dirs)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n else:\n for pnts in split_points:\n if predict_segmentation:\n rgbs, invalid, sigmas, segs = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n segs_all.append(segs)\n else:\n rgbs, invalid, sigmas = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n points = None\n viewdirs = None\n # (B*K, 4) OR (SB, B'*K, 4)\n rgbs = torch.cat(rgbs_all, dim=eval_batch_dim)\n invalid = torch.cat(invalid_all, dim=eval_batch_dim)\n sigmas = torch.cat(sigmas_all, dim=eval_batch_dim)\n\n if predict_segmentation:\n segs = torch.cat(segs_all, dim=eval_batch_dim)\n segs = segs.reshape(B, K, -1) # (B, K, n_classes)\n\n rgbs = rgbs.reshape(B, K, -1) # (B, K, 4 or 5)\n invalid = invalid.reshape(B, K, -1)\n sigmas = sigmas.reshape(B, K)\n\n if self.training and self.noise_std > 0.0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n alphas = 1 - torch.exp(-deltas.abs() * torch.relu(sigmas)) # (B, K) (delta should be positive anyways)\n\n if self.hard_alpha_cap:\n alphas[:, -1] = 1\n\n deltas = None\n sigmas = None\n alphas_shifted = torch.cat(\n [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1\n ) # (B, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (B)\n weights = alphas * T[:, :-1] # (B, K)\n # alphas = None\n alphas_shifted = None\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (B, 3)\n depth_final = torch.sum(weights * z_samp, -1) # (B)\n\n\n\n if self.white_bkgd:\n # White background\n pix_alpha = weights.sum(dim=1) # (B), pixel alpha\n rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1) # (B, 3)\n\n if predict_segmentation:\n segs_final = torch.sum(weights.unsqueeze(-1) * segs, dim=-2) # (B, n_classes)\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs,\n # segs,\n segs_final\n )\n else:\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs\n )\n\n def forward(\n self, model, rays, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, predict_segmentation=False, sample_from_dist=None):\n \"\"\"\n :model nerf model, should return (SB, B, (r, g, b, sigma))\n when called with (SB, B, (x, y, z)), for multi-object:\n SB = 'super-batch' = size of object batch,\n B = size of per-object ray batch.\n Should also support 'coarse' boolean argument for coarse NeRF.\n :param rays ray spec [origins (3), directions (3), near (1), far (1)] (SB, B, 8)\n :param want_weights if true, returns compositing weights (SB, B, K)\n :param predict_segmentation if true, return the segmentation class distribution for each pixel\n :return render dict\n \"\"\"\n with profiler.record_function(\"renderer_forward\"):\n if self.sched is not None and self.last_sched.item() > 0:\n self.n_coarse = self.sched[1][self.last_sched.item() - 1]\n self.n_fine = self.sched[2][self.last_sched.item() - 1]\n\n assert len(rays.shape) == 3\n superbatch_size = rays.shape[0]\n rays = rays.reshape(-1, 8) # (SB * B, 8)\n\n if sample_from_dist is None:\n z_coarse = self.sample_coarse(rays) # (B, Kc)\n else:\n prop_weights, prop_z_samp = sample_from_dist\n n_samples = prop_weights.shape[-1]\n prop_weights = prop_weights.reshape(-1, n_samples)\n prop_z_samp = prop_z_samp.reshape(-1, n_samples)\n z_coarse = self.sample_coarse_from_dist(rays, prop_weights, prop_z_samp)\n z_coarse, _ = torch.sort(z_coarse, dim=-1)\n\n coarse_composite = self.composite(\n model, rays, z_coarse, coarse=True, sb=superbatch_size, predict_segmentation=predict_segmentation\n )\n\n outputs = DotMap(\n coarse=self._format_outputs(\n coarse_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas,\n want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps, want_segmentation=predict_segmentation\n ),\n )\n\n if self.using_fine:\n all_samps = [z_coarse]\n if self.n_fine - self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine(rays, coarse_composite[0].detach())\n ) # (B, Kf - Kfd)\n if self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine_depth(rays, coarse_composite[2])\n ) # (B, Kfd)\n z_combine = torch.cat(all_samps, dim=-1) # (B, Kc + Kf)\n z_combine_sorted, argsort = torch.sort(z_combine, dim=-1)\n fine_composite = self.composite(\n model, rays, z_combine_sorted, coarse=False, sb=superbatch_size,\n )\n outputs.fine = self._format_outputs(\n fine_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas, want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps\n )\n\n return outputs\n\n def _format_outputs(\n self, rendered_outputs, superbatch_size, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, want_segmentation=False\n ):\n if want_segmentation:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps, segs_final = rendered_outputs\n else:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps = rendered_outputs\n\n n_smps = weights.shape[-1]\n out_d_rgb = rgb_final.shape[-1]\n out_d_i = invalid.shape[-1]\n\n if superbatch_size > 0:\n rgb_final = rgb_final.reshape(superbatch_size, -1, out_d_rgb)\n depth = depth.reshape(superbatch_size, -1)\n weights = weights.reshape(superbatch_size, -1, n_smps)\n alphas = alphas.reshape(superbatch_size, -1, n_smps)\n invalid = invalid.reshape(superbatch_size, -1, n_smps, out_d_i)\n z_samps = z_samps.reshape(superbatch_size, -1, n_smps)\n rgb_samps = rgb_samps.reshape(superbatch_size, -1, n_smps, out_d_rgb)\n\n if want_segmentation:\n out_segs = segs_final.shape[-1]\n segs_final = segs_final.reshape(superbatch_size, -1, out_segs)\n\n ret_dict = DotMap(rgb=rgb_final, depth=depth, invalid=invalid)\n if want_weights:\n ret_dict.weights = weights\n if want_alphas:\n ret_dict.alphas = alphas\n if want_z_samps:\n ret_dict.z_samps = z_samps\n if want_rgb_samps:\n ret_dict.rgb_samps = rgb_samps\n if want_segmentation:\n ret_dict.segs = segs_final\n # ret_dict.segs_raw = segs_raw\n return ret_dict\n\n def sched_step(self, steps=1):\n \"\"\"\n Called each training iteration to update sample numbers\n according to schedule\n \"\"\"\n if self.sched is None:\n return\n self.iter_idx += steps\n while (\n self.last_sched.item() < len(self.sched[0])\n and self.iter_idx.item() >= self.sched[0][self.last_sched.item()]\n ):\n self.n_coarse = self.sched[1][self.last_sched.item()]\n self.n_fine = self.sched[2][self.last_sched.item()]\n print(\n \"INFO: NeRF sampling resolution changed on schedule ==> c\",\n self.n_coarse,\n \"f\",\n self.n_fine,\n )\n self.last_sched += 1\n\n @classmethod\n def from_conf(cls, conf, white_bkgd=False, eval_batch_size=100000):\n return cls(\n conf.get(\"n_coarse\", 128),\n conf.get(\"n_fine\", 0),\n n_fine_depth=conf.get(\"n_fine_depth\", 0),\n noise_std=conf.get(\"noise_std\", 0.0),\n depth_std=conf.get(\"depth_std\", 0.01),\n white_bkgd=conf.get(\"white_bkgd\", white_bkgd),\n lindisp=conf.get(\"lindisp\", True),\n eval_batch_size=conf.get(\"eval_batch_size\", eval_batch_size),\n sched=conf.get(\"sched\", None),\n hard_alpha_cap=conf.get(\"hard_alpha_cap\", False)\n )\n\n def bind_parallel(self, net, gpus=None, simple_output=False):\n \"\"\"\n Returns a wrapper module compatible with DataParallel.\n Specifically, it renders rays with this renderer\n but always using the given network instance.\n Specify a list of GPU ids in 'gpus' to apply DataParallel automatically.\n :param net A PixelNeRF network\n :param gpus list of GPU ids to parallize to. If length is 1,\n does not parallelize\n :param simple_output only returns rendered (rgb, depth) instead of the \n full render output map. Saves data tranfer cost.\n :return torch module\n \"\"\"\n wrapped = _RenderWrapper(net, self, simple_output=simple_output)\n if gpus is not None and len(gpus) > 1:\n print(\"Using multi-GPU\", gpus)\n wrapped = torch.nn.DataParallel(wrapped, gpus, dim=1)\n return wrapped" }, { "identifier": "map_fn", "path": "utils/array_operations.py", "snippet": "def map_fn(batch, fn):\ndef to(data, device, non_blocking=True):\ndef set_requires_grad(nets, requires_grad=False):\ndef mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None, keepdim=False):\ndef apply_crop(array, crop):\ndef shrink_mask(mask, shrink=3):\ndef get_mask(size, border=5, device=None):\ndef get_grid(H, W, normalize=True):\ndef detach(t):" }, { "identifier": "color_tensor", "path": "utils/plotting.py", "snippet": "def color_tensor(tensor: torch.Tensor, cmap, norm=False):\n if norm:\n tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())\n map = plt.cm.get_cmap(cmap)\n tensor = torch.tensor(map(tensor.cpu().numpy()), device=tensor.device)[..., :3]\n return tensor" } ]
import numpy as np import sys import copy import hydra import torch from moviepy.video.io.ImageSequenceClip import ImageSequenceClip from tqdm import tqdm from scripts.inference_setup import * from models.bts.model import BTSNet, ImageRaySampler from models.common.render import NeRFRenderer from utils.array_operations import map_fn, unsqueezer from utils.plotting import color_tensor
11,884
sys.path.append(".") def main(): s_img = True s_depth = True dry_run = False indices = [1044] d_min = 3 d_max = 40 task = "KITTI-360" assert task in ["KITTI-360", "KITTI-Raw", "RealEstate10K"] cam_traj = "simple_movement.npy" if task == "KITTI-360": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kitti360("videos/nvs", "2013_05_28_drive_0000_sync", "val_seq") elif task == "KITTI-Raw": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kittiraw("videos/nvs", "val") elif task == "RealEstate10K": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_re10k("videos/nvs", "val") else: raise ValueError(f"Invalid task: {task}") # Slightly hacky, but we need to load the config based on the task global config config = {} @hydra.main(version_base=None, config_path="../../configs", config_name=config_path) def main_dummy(cfg): global config config = copy.deepcopy(cfg) main_dummy() print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) traj_folder = Path("scripts/videos/trajectories") print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) net = BTSNet(config["model_conf"])
sys.path.append(".") def main(): s_img = True s_depth = True dry_run = False indices = [1044] d_min = 3 d_max = 40 task = "KITTI-360" assert task in ["KITTI-360", "KITTI-Raw", "RealEstate10K"] cam_traj = "simple_movement.npy" if task == "KITTI-360": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kitti360("videos/nvs", "2013_05_28_drive_0000_sync", "val_seq") elif task == "KITTI-Raw": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_kittiraw("videos/nvs", "val") elif task == "RealEstate10K": dataset, config_path, cp_path, out_path, resolution, cam_incl_adjust = setup_re10k("videos/nvs", "val") else: raise ValueError(f"Invalid task: {task}") # Slightly hacky, but we need to load the config based on the task global config config = {} @hydra.main(version_base=None, config_path="../../configs", config_name=config_path) def main_dummy(cfg): global config config = copy.deepcopy(cfg) main_dummy() print("Setup folders") out_path.mkdir(exist_ok=True, parents=True) traj_folder = Path("scripts/videos/trajectories") print('Loading checkpoint') cp = torch.load(cp_path, map_location=device) net = BTSNet(config["model_conf"])
renderer = NeRFRenderer.from_conf(config["renderer"])
2
2023-11-12 21:53:27+00:00
16k
TCLResearchEurope/torch-dag
torch_dag_algorithms/pruning/module_multipliers.py
[ { "identifier": "structured_modules", "path": "torch_dag/structured_modules.py", "snippet": "ACTIVATION_MODULES_T = Union[\n nn.ReLU,\n nn.ReLU6,\n nn.SiLU,\n nn.Softmax,\n nn.Sigmoid,\n nn.Hardswish,\n nn.Hardsigmoid,\n nn.GELU,\n nn.LeakyReLU,\n nn.ELU,\n nn.Tanh,\n nn.Identity,\n]\nACTIVATION_MODULES = get_args(ACTIVATION_MODULES_T) # -ish...\n B, C, H, W = x.shape\n B, C, H, W = x.shape\n B, N, C = x.shape\n H = W = math.isqrt(N)\n B, N, C = x.shape\n SPECS = (\n '(B,C,H,W)->(B,H*W,C)',\n '(B,N,C)->(B,N,target)',\n )\n PREDECESSOR_KEYWORD = 'predecessor'\n B, C, H, W = x.shape\n B, N, C = x.shape\n SPECS = (\n '(B,C,H,W)->(B,H*W,C)',\n '(B,N,C)->(B,N,target)',\n )\n PREDECESSOR_KEYWORD = 'predecessor'\n B, C, H, W = x.shape\n B, N, C = x.shape\n B, C, H, W = x.shape\n B, C, H, W = x.shape\n B, N, C = x.shape\n H = W = math.isqrt(N)\n B, T, C = x.size() # batch size, sequence length, embedding dimensionality (n_embd)\n N = int(h * scale_factor)\n B, T, C, _ = q.size()\n PREDECESSOR_KEYWORD = 'predecessor'\n B, N, C = x.shape\n B, N, C = x.shape\ndef space_to_depth(x: torch.Tensor, block_size: int):\ndef depth_to_space(x: torch.Tensor, block_size: int):\n def build_activation_module(cls, activation_name):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(\n self,\n dim: int,\n ):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, perm: Tuple[int, ...]):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, dim0: int, dim1: int):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n dim,\n keepdim: bool,\n ):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n block_size: int,\n ):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, inputs: torch.Tensor, target_shape=None):\n def __init__(\n self,\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, inputs: torch.Tensor, target_shape=None):\n def forward(self, inputs: torch.Tensor):\n def forward(self, inputs: List[torch.Tensor]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(\n self,\n index: Union[int, Tuple[int, ...]],\n ):\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]):\n def calc_same_pad(self, i: int, k: int, s: int, d: int) -> int:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, slice_spec):\n def replace_ellipses_by_slices(slice_spec):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, transpose: bool, normalize: bool = True):\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def __init__(self, num_channels: int, use_bias: bool, weight_init_value: float = 1e-5):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: Union[torch.Tensor, List[torch.Tensor]]) -> torch.Tensor:\n def __init__(self, bn: torch.nn.BatchNorm1d):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, scalar):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, param: torch.nn.Parameter):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, p: str = 'fro', dim=None, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, dim, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, dim, keepdim=False):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, split_size_or_sections, dim=0):\n def forward(self, x) -> List[torch.Tensor]:\n def __init__(\n self,\n spec: Union[str, Dict],\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def clear_custom_buffers(self):\n def forward(self, x) -> torch.Tensor:\n def __init__(\n self,\n spec: Union[str, Dict],\n target_shape: Optional[Tuple[int, ...]] = None,\n ):\n def forward(self, x) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, patch_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, dim: int):\n def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, chunks, dim: int):\n def forward(self, x: torch.Tensor) -> List[torch.Tensor]:\n def __init__(self, dim: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self):\n def forward(self, x: List[torch.Tensor]) -> torch.Tensor:\n def __init__(self, start_dim: int = 0, end_dim: int = - 1):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, block_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, block_size: int):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(\n self,\n size=None,\n scale_factor=None,\n mode='nearest',\n align_corners=None,\n recompute_scale_factor=None,\n antialias=False,\n ):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, p=2.0, dim=1, ):\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n def __init__(self, pad: tuple, mode: str = 'constant', value: int = None):\n def forward(self, input):\n def __init__(self, ndim, bias):\n def forward(self, input):\n def forward(self, x):\n def forward(self, idx):\n def __init__(self, config):\n def forward(self, x):\n def __init__(self, scale_factor=2.0):\n def forward(self, x):\n def __init__(self,\n dim: int,\n num_heads: int,\n use_bias: bool = True,\n dropout_rate: float = 0.0,\n output_dropout_rate: float = 0.0,\n include_reshapings: bool = False,\n ):\n def forward(self, x: List[torch.Tensor]):\n def __init__(self, in_features: int, out_features):\n def forward(self, x):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def entropy_loss(self, epsilon: float = 0.01):\n def trainable_params(self):\n def fuse(self):\n def __init__(self, in_features: int, out_features, hidden_dim: int):\n def forward(self, x):\n def trainable_params(self):\n def __init__(self, in_features: int, out_features):\n def forward(self, x):\n def __init__(self, scale_factor: int, align_corners: bool = False):\n def forward(self, x):\n def __init__(\n self,\n num_heads: int,\n ):\n def forward(self, input: torch.Tensor):\n def __init__(\n self,\n num_heads: int,\n ):\n def forward(self, input: torch.Tensor):\n def __init__(self, in_features: int, out_features: int):\n def trainable_params(self):\n def non_logits_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01):\n def fuse(self):\n def __init__(self, in_features: int, out_features: int, bias: bool = True):\n def forward(self, x):\n def __init__(self, in_features: int, out_features: int):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self):\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01):\n def __init__(\n self,\n dim,\n num_ss_tokens: int,\n s_ratio: int = 4,\n use_bias: bool = True,\n activation=nn.ReLU(),\n ):\n def forward(self, x):\n def __init__(self, in_features: int, out_features: int, ks: int, padding, stride, bias: bool):\n def non_logits_params(self):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self) -> torch.Tensor:\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01) -> torch.Tensor:\n def fuse(self) -> Tuple[nn.Conv2d, nn.Conv2d]:\n def build_from_conv(cls, module: nn.Conv2d) -> \"DecomposedConv\":\n def __init__(self, in_features: int, out_features: int, bias: bool):\n def trainable_params(self):\n def sample_from_logits(logits: torch.Tensor) -> torch.Tensor:\n def proportion(self) -> torch.Tensor:\n def forward(self, x):\n def entropy_loss(self, epsilon: float = 0.01) -> torch.Tensor:\n def fuse(self) -> Tuple[nn.Linear, nn.Linear]:\n def build_from_linear(cls, module: nn.Linear) -> \"DecomposedLinear\":\n def __init__(self, pow: Union[float, int]):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, dim: int):\n def forward(self, inputs: torch.Tensor):\n def __init__(self, sizes: Union[torch.Size, int]):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, dropout_p=0.0, is_causal: bool = False):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, function: Callable, spec: Dict):\n def _build_inputs(self, spec: Dict, inputs=None, inputs_counter: int = 0):\n def forward(self, inputs):\n def __init__(self, dim):\n def forward(self, inputs: List[torch.Tensor]):\n def __init__(self, arg):\n def forward(self, x):\n def __init__(self, shifts: Union[int, Tuple[int, ...]], dims: Union[int, Tuple[int, ...]] = None):\n def forward(self, inputs: torch.Tensor):\n def __init__(\n self,\n index: Union[int, Tuple[int, ...]],\n ):\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n width_multiplier: float = 0.25,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=False,\n qk_norm=False,\n attn_drop=0.,\n proj_drop=0.,\n norm_layer=nn.LayerNorm,\n ):\n def forward(self, x):\n def convert_from_timm(cls, module: Attention):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n width_multiplier: float = 0.25,\n ):\n def forward(self, x):\n def __init__(\n self,\n dim,\n num_heads: int = 8,\n use_bias: bool = True,\n attn_drop: float = 0.0,\n proj_drop: float = 0.0,\n use_out_bias: bool = True,\n ):\n def forward(self, x):\n def __init__(\n self,\n in_features: int,\n out_features: int,\n use_bias: bool = False,\n ):\n def rank(self) -> int:\n def s(self):\n def build_from_linear(cls, module: nn.Linear):\n def forward(self, x):\nclass ActivationModuleBuilder:\nclass EmptyModule(torch.nn.Module):\nclass AddModule(torch.nn.Module):\nclass SubModule(torch.nn.Module):\nclass MulModule(torch.nn.Module):\nclass DivModule(torch.nn.Module):\nclass ConcatModule(torch.nn.Module):\nclass PermuteModule(torch.nn.Module):\nclass TransposeModule(torch.nn.Module):\nclass GlobalMeanPool2DModule(torch.nn.Module):\nclass SpaceToDepthModule(torch.nn.Module):\nclass ReshapeModule(torch.nn.Module):\nclass ReshapeModuleV2(torch.nn.Module):\nclass PatchifyModule(torch.nn.Module):\nclass DePatchifyModule(torch.nn.Module):\nclass TensorMergerModule(torch.nn.Module):\nclass TensorExtractorModule(torch.nn.Module):\nclass Conv2DSameModule(torch.nn.Conv2d):\nclass SliceModule(torch.nn.Module):\nclass GetShapeModule(torch.nn.Module):\nclass GetShapeModuleV2(torch.nn.Module):\nclass TfMatmulModule(torch.nn.Module):\nclass MatmulModule(torch.nn.Module):\nclass ChannelAffineModule(torch.nn.Module):\nclass TfTokenizeModule(torch.nn.Module):\nclass TfDetokenizeModule(torch.nn.Module):\nclass TfBatchNorm1d(torch.nn.Module):\nclass ScalarMul(torch.nn.Module):\nclass ParameterModule(torch.nn.Module):\nclass NormModule(torch.nn.Module):\nclass MeanModule(torch.nn.Module):\nclass SumModule(torch.nn.Module):\nclass SplitModule(torch.nn.Module):\nclass ReshapeWithSpecModule(torch.nn.Module):\nclass ReshapeWithSpecModuleV2(torch.nn.Module):\nclass TokenizeModule(torch.nn.Module):\nclass DetokenizeModule(torch.nn.Module):\nclass UnbindModule(torch.nn.Module):\nclass ChunkModule(torch.nn.Module):\nclass AuxiliaryTokenModule(torch.nn.Module):\nclass ExpandAsModule(torch.nn.Module):\nclass FlattenModule(torch.nn.Module):\nclass DepthToSpaceModule(torch.nn.Module):\nclass SpaceToDepthModule(torch.nn.Module):\nclass InterpolateModule(torch.nn.Module):\nclass NormalizeModule(torch.nn.Module):\nclass PadModule(torch.nn.Module):\nclass LayerNormWithOptionalBias(nn.Module):\nclass GeluGPT(nn.Module):\nclass PositionalEmbeddingGPT(nn.Module):\nclass CausalSelfAttention(nn.Module):\nclass BilinearUpsampling(nn.Module):\nclass EfficientAttention(nn.Module):\nclass AdjustableQueryKeyMatmul(nn.Module):\nclass PreFusedAdjustableQueryKeyMatmul(nn.Module):\nclass FusedAdjustableQueryKeyMatmul(nn.Module):\nclass HalfPixelCentersFalseBilinearUpsample(nn.Module):\nclass MakeHeadsModule(torch.nn.Module):\nclass UnmakeHeadsModule(torch.nn.Module):\nclass SparseAdjustableLinear(nn.Module):\nclass SparseLinear(nn.Linear):\nclass DecomposedSparseLinear(nn.Module):\nclass StateSpaceAttentionV2(torch.nn.Module):\nclass DecomposedConv(nn.Module):\nclass DecomposedLinear(nn.Module):\nclass PowerModule(torch.nn.Module):\nclass UnsqueezeModule(torch.nn.Module):\nclass ExpandTokenModule(torch.nn.Module):\nclass AddcmulModule(torch.nn.Module):\nclass ScaledDotProductAttentionModule(torch.nn.Module):\nclass AutoWrapFunctionModule(torch.nn.Module):\nclass StackModule(torch.nn.Module):\nclass ArgModule(torch.nn.Module):\nclass RollModule(torch.nn.Module):\nclass GetItemModule(torch.nn.Module):\nclass StageZeroSllrcAttention(torch.nn.Module):\nclass Attention(nn.Module):\nclass BatchedAttention(torch.nn.Module):\nclass SllrcAttention(torch.nn.Module):\nclass MultiQueryAttention(torch.nn.Module):\nclass SvdLinear(nn.Module):" }, { "identifier": "DagModule", "path": "torch_dag/core/dag_module.py", "snippet": "class DagModule(torch.nn.Module):\n MAX_LEN_REPR = None\n\n def __init__(\n self,\n name: str,\n vertices: Optional[List[Vertex]] = None,\n output_vertex: Optional[InnerVertex] = None,\n ):\n super().__init__()\n self.name = name\n self.vertices = vertices if vertices is not None else []\n self.output_vertex = output_vertex\n self.forward_dict = None\n self.inputs_dict = None\n self.cache_forward_dict = False\n self._inner_modules = None\n self.forward_scaffold = {}\n self.output_index = None\n self.compiled = False\n self.update_inner_modules()\n\n def compile(self, inputs: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None):\n \"\"\"\n In general `forward` method of DagModule is not `torch.compile` friendly. To overcome that\n we need to use a modified implementation of the forward pass, with no cacheing of intermediate tensors.\n Additionally, some modules may require a compile-type step for `torch.compile` usage.\n :param inputs: optional input (a dummy tensor for a single forward pass)\n \"\"\"\n if inputs is not None:\n is_training = self.training\n if is_training:\n self.eval()\n _ = self(inputs)\n if is_training:\n self.train()\n\n self.forward_scaffold, self.output_index = self.get_forward_scaffold()\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n v.module.compile()\n self.compiled = True\n\n @property\n def inner_modules(self) -> torch.nn.ModuleList:\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n return self._inner_modules\n\n @property\n def input_vertices(self) -> List[InputVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InputVertex)]\n\n @property\n def inner_vertices(self) -> List[InnerVertex]:\n return [vertex for vertex in self.vertices if isinstance(vertex, InnerVertex)]\n\n def update_inner_modules(self):\n self._inner_modules = torch.nn.ModuleList([vertex.module for vertex in self.inner_vertices])\n for iv in self.inner_vertices:\n if isinstance(iv.module, DagModule):\n iv.module.update_inner_modules()\n\n def get_vertex_by_name(self, name: str) -> Union[InnerVertex, InputVertex]:\n result = [vertex for vertex in self.vertices if vertex.name == name]\n if len(result) == 1:\n return result[0]\n elif len(result) > 1:\n raise AssertionError(f'Multiple vertices found with name: {name} -> {result}')\n else:\n return\n\n def get_forward_scaffold(self):\n # a mapping between vertices index and its predecessors indices\n forward_scaffold = {}\n for k, vertex in enumerate(self.vertices):\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n predecessors_indices = [\n self.vertices.index(pd) for pd in predecessors\n ]\n forward_scaffold[k] = predecessors_indices\n\n output_index = self.vertices.index(self.output_vertex)\n\n return forward_scaffold, output_index\n\n def compiled_forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n\n assert self.compiled\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_list = [None for _ in range(len(self.vertices))]\n\n for k, input in enumerate(inputs):\n forward_list[k] = input\n\n num_inputs = len(inputs)\n\n for k in range(len(self.vertices)):\n if k < num_inputs:\n pass\n else:\n\n pd_indices = self.forward_scaffold[k]\n module_inputs = [forward_list[pd_index] for pd_index in pd_indices]\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n try:\n result = self.vertices[k].module(module_inputs)\n except (TypeError, AttributeError):\n result = self.vertices[k].module(*module_inputs)\n result = _postprocess_module_output(result)\n\n forward_list[k] = result\n\n return forward_list[self.output_index]\n\n def forward(self, inputs: Union[torch.Tensor, List[torch.Tensor]]) -> Dict[\n InnerVertex, Union[torch.Tensor, List[torch.Tensor]]]:\n # this is for `torch.compile` usage\n if self.compiled:\n return self.compiled_forward(inputs)\n\n if not isinstance(inputs, List):\n inputs = [inputs]\n if len(inputs) != len(self.input_vertices):\n raise AssertionError\n\n forward_dict = {}\n for k, v in enumerate(self.input_vertices):\n forward_dict[v] = inputs[k]\n\n # forward_dict = {vertex: tensor for vertex, tensor in zip(self.input_vertices, inputs)}\n inputs_dict = {}\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n pass\n elif isinstance(vertex, InnerVertex):\n predecessors = vertex.predecessors\n module_inputs = [forward_dict[pd] for pd in predecessors]\n inputs_dict[vertex] = module_inputs\n\n if len(module_inputs) == 1:\n module_inputs = module_inputs[0]\n\n try:\n result = vertex.module(module_inputs)\n except (TypeError, AttributeError):\n result = vertex.module(*module_inputs)\n # if isinstance(result, Tuple):\n result = _postprocess_module_output(result)\n\n forward_dict[vertex] = result\n if self.cache_forward_dict:\n self.forward_dict = forward_dict\n self.inputs_dict = inputs_dict\n return forward_dict[self.output_vertex]\n\n def traverse(\n self,\n processor: VertexProcessor = None,\n ):\n if processor is None:\n inner_vertices = []\n for inner_vertex in self.inner_vertices:\n if isinstance(inner_vertex.module, DagModule):\n inner_vertices.extend(inner_vertex.module.traverse())\n inner_vertices.append(inner_vertex)\n return inner_vertices\n else:\n for inner_vertex in self.traverse():\n processor(inner_vertex)\n # TODO: Remove after validation\n # self._update_inner_modules()\n\n def _check_if_name_unique(self, name: str):\n if name in [v.name for v in self.vertices]:\n raise ValueError(\n f'{self.name} already has an Vertex with name {name}. Please use different name.'\n )\n\n def add_input_vertex(self, name: str) -> InputVertex:\n self._check_if_name_unique(name)\n input_vertex = InputVertex(name)\n self.vertices.append(input_vertex)\n return input_vertex\n\n def add_vertex(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ) -> InnerVertex:\n self._check_if_name_unique(name)\n assert isinstance(module, torch.nn.Module)\n\n inner_vertex = InnerVertex(\n name=name,\n module=module,\n predecessors=predecessors,\n )\n for predecessor in predecessors:\n if predecessor not in self.vertices:\n raise ValueError(f'The predecessor: {predecessor} of InnerVertex: {InnerVertex} is not in '\n f'the DagModule: {self.name}')\n self.vertices.append(inner_vertex)\n self.inner_modules.append(module)\n inner_vertex.dag_module = self\n return inner_vertex\n\n def __repr__(self):\n representation = f'{self.__class__.__name__}[{self.name}]'\n if len(self.vertices) == 0:\n return representation\n for inner_vertex in self.inner_vertices:\n inner_vertex.MAX_LEN_REPR = self.MAX_LEN_REPR\n\n for vertex in self.vertices:\n if isinstance(vertex, InputVertex):\n representation += f'\\n << {vertex.name} '\n else:\n index = self.inner_vertices.index(vertex)\n prefix = '>>' if vertex == self.output_vertex else '*'\n if isinstance(vertex.module, DagModule):\n prefix += '#'\n representation += f\"\\n{prefix} {index}: {vertex} \" \\\n f\"--> predecessors: {vertex.predecessors}, \" \\\n f\"successors: {vertex.successors}\"\n representation += f' {self.add_custom_module_info(vertex)}'\n for vertex in self.inner_vertices:\n vertex.MAX_LEN_REPR = None\n return representation\n\n def add_custom_module_info(self, vertex: InnerVertex):\n m = vertex.module\n if isinstance(m, torch.nn.Conv2d):\n return f'Conv2d(in={m.in_channels}, out={m.out_channels}, ks={m.kernel_size}, padding={m.padding})'\n if isinstance(m, torch.nn.Linear):\n return f'Linear(in={m.in_features}, out={m.out_features})'\n return ''\n\n def mark_current_top_vertex_as_output(self):\n if not self.inner_vertices:\n raise ValueError(f'One cannot mark top node in an empty {self}')\n if self.output_vertex is not None:\n logger.warning(f'{self} already has an output vertex. Replacing...')\n self.output_vertex = self.inner_vertices[-1]\n\n @property\n def module_classes(self) -> Set:\n return set([m.__class__ for m in self.inner_modules])\n\n def unroll_inner_modules(self) -> List[torch.nn.Module]:\n result = []\n for m in self.inner_modules:\n if not isinstance(m, DagModule):\n result.append(m)\n else:\n result.extend(m.unroll_inner_modules())\n return result\n\n def save(self, path: str):\n # TODO: Remove after validation\n # self._update_inner_modules()\n self.enforce_names_uniqueness()\n os.makedirs(path, exist_ok=True)\n atomic_modules = self.unroll_inner_modules()\n self.clear_custom_buffers()\n torch.save(torch.nn.ModuleList(atomic_modules), os.path.join(path, 'modules.pt'))\n with open(os.path.join(path, 'config.dict.json'), 'w') as f:\n json.dump(self.config_dict(), f)\n\n def clear_custom_buffers(self):\n for module in self.unroll_inner_modules():\n if hasattr(module, 'clear_custom_buffers'):\n module._buffers.clear()\n\n @classmethod\n def load(\n cls,\n path: str,\n map_location='cpu',\n custom_module_classes: Tuple[Type[torch.nn.Module]] = (),\n ) -> \"DagModule\":\n \"\"\"\n\n :param path: directory from which model should be loaded\n :param map_location: defaults to `cpu`\n :param custom_module_classes: custom torch module classes needed for loading a `DagModule` that was built\n using these modules\n \"\"\"\n with open(os.path.join(path, 'config.dict.json'), 'r') as f:\n config_dict = json.load(f)\n m = torch.load(os.path.join(path, 'modules.pt'), map_location=map_location)\n return cls.load_from_config_dict_and_atomic_modules(\n config_dict=config_dict,\n atomic_modules=m\n )\n\n @classmethod\n def load_from_config_dict_and_atomic_modules(\n cls,\n config_dict: Dict,\n atomic_modules: List[torch.nn.Module]\n ) -> \"DagModule\":\n output_index = config_dict.pop('output_index')\n name = config_dict.pop('name')\n if 'class' in config_dict:\n class_name = config_dict.pop('class')\n else:\n class_name = cls.__name__\n dag = None\n if class_name == cls.__name__:\n dag = cls(name)\n for subclass in cls.__subclasses__():\n if subclass.__name__ == class_name:\n dag = subclass(name)\n\n if dag is None:\n raise NotImplementedError(f'There is no subclass with name: {class_name}.')\n\n for k, (key, config) in enumerate(config_dict.items()):\n if config['type'] == 'input':\n dag.add_input_vertex(name=config['name'])\n else:\n predecessors = [dag.vertices[index] for index in config['predecessor_indices']]\n if config['is_atomic']:\n module = atomic_modules[config['module_index']]\n else:\n module = cls.load_from_config_dict_and_atomic_modules(\n config_dict=config['module_dict'],\n atomic_modules=atomic_modules,\n )\n vertex = dag.add_vertex(\n name=config['name'],\n module=module,\n predecessors=predecessors,\n )\n orbit = config.get('orbit')\n if orbit:\n vertex.orbit = orbit\n if k == output_index:\n dag.output_vertex = vertex\n\n return dag\n\n def config_dict(self, atomic_modules: List[torch.nn.Module] = None) -> Dict:\n if atomic_modules is None:\n atomic_modules = self.unroll_inner_modules()\n config_dict = {}\n for k, vertex in enumerate(self.vertices):\n _config = vertex.config_dict(atomic_modules)\n config_dict[k] = _config\n\n config_dict['name'] = self.name\n config_dict['class'] = self.__class__.__name__\n config_dict['output_index'] = self.vertices.index(self.output_vertex)\n return config_dict\n\n def _get_inner_vertex_predecessor_indices(self, inner_vertex: InnerVertex) -> List[int]:\n result = [\n self.vertices.index(predecessor)\n for predecessor in inner_vertex.predecessors\n ]\n return result\n\n @property\n def flat(self) -> bool:\n for v in self.inner_vertices:\n if isinstance(v.module, DagModule):\n return False\n return True\n\n def flatten(self, input_shape_for_verification: Optional[Tuple[int, ...]] = None) -> \"DagModule\":\n \"\"\"\n This method will switch the `dag` to `eval` mode if `input_shape_for_verification` is provided.\n :param input_shape_for_verification:\n :return:\n \"\"\"\n dag_copy = deepcopy(self)\n if self.flat:\n return dag_copy\n\n if input_shape_for_verification:\n dag_copy.eval()\n x = torch.normal(mean=torch.zeros(size=input_shape_for_verification))\n reference_output = dag_copy(x)\n\n # builds a new cell (not in place flatten)\n dag = self.__class__(name=dag_copy.name, vertices=dag_copy.input_vertices)\n for v in dag_copy.inner_vertices:\n if not isinstance(v.module, DagModule):\n dag.vertices.append(v)\n v.dag_module = dag\n if v == dag_copy.output_vertex:\n dag.output_vertex = v\n else:\n inner_dag_predecessors = v.predecessors\n inner_dag_successors = v.successors\n inner_dag = v.module.flatten()\n for iv in inner_dag.inner_vertices:\n for pd in iv.predecessors: # remap predecessors where needed\n if isinstance(pd, InputVertex):\n pd_index_in_inner_dag = inner_dag.input_vertices.index(pd)\n index = iv.predecessors.index(pd)\n iv.predecessors[index] = inner_dag_predecessors[pd_index_in_inner_dag]\n if inner_dag.output_vertex == iv: # remap output of inner dag\n for suc in inner_dag_successors:\n index = suc.predecessors.index(v)\n suc.predecessors[index] = iv\n iv.dag_module = dag\n dag.vertices.append(iv)\n if v == dag_copy.output_vertex:\n dag.output_vertex = iv\n assert all([e in dag.vertices for e in iv.predecessors])\n\n if input_shape_for_verification:\n dag.eval()\n new_output = dag(x)\n assert torch.abs(reference_output - new_output).sum() == 0.0\n\n # TODO: Remove after validation\n # self._update_inner_modules()\n dag.enforce_names_uniqueness()\n\n return dag\n\n def enforce_names_uniqueness(self):\n names = [v.name for v in self.vertices]\n while len(names) != len(set(names)):\n names_counter = Counter()\n for v in self.vertices:\n name = v.name\n names_counter[name] += 1\n if names_counter[name] > 1:\n new_name = f'{name}_{names_counter[name] - 1}'\n logger.debug(f'Renaming: {name} -> {new_name}')\n v.name = new_name\n names = [v.name for v in self.vertices]\n\n def clear_tensor_dicts(self):\n self.forward_dict = None\n self.inputs_dict = None\n\n @property\n def device(self):\n # https://discuss.pytorch.org/t/how-to-check-if-model-is-on-cuda/180/10\n # useful, but may be dangerous\n self.update_inner_modules()\n device_ = next(iter(self.parameters())).device\n if not all([p.device == device_ for p in self.parameters()]):\n raise AssertionError(f'Not all parameters of {self.name} are on the same device')\n return device_" }, { "identifier": "InputVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InputVertex(Vertex):\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n 'type': 'input',\n }" }, { "identifier": "InnerVertex", "path": "torch_dag/core/dag_module.py", "snippet": "class InnerVertex(Vertex):\n def __init__(\n self,\n name: str,\n module: torch.nn.Module,\n predecessors: List[Vertex],\n ):\n super().__init__(name=name)\n self._module = module\n self._predecessors = list(predecessors)\n self.dag_module: \"DagModule\" = None\n self.orbit = None\n\n @property\n def successors(self) -> List['InnerVertex']:\n if self.dag_module is None:\n logger.error(f'Trying to get successors of an InnerVertex that has not been assigned to any DagModule.')\n return [vertex for vertex in self.dag_module.inner_vertices if self in vertex.predecessors]\n\n @property\n def predecessors(self) -> List[Vertex]:\n return self._predecessors\n\n @property\n def predecessor_indices(self) -> List[Vertex]:\n return [self.dag_module.vertices.index(pd) for pd in self.predecessors]\n\n @predecessors.setter\n def predecessors(self, new_predecessors: List[Vertex]):\n if not isinstance(new_predecessors, list):\n logger.error(f'Predecessors is expected to be a list. Got {type(new_predecessors)} except.')\n self._predecessors = new_predecessors\n\n @property\n def module(self) -> torch.nn.Module:\n return self._module\n\n @module.setter\n def module(self, module: torch.nn.Module):\n self._module = module\n # TODO: Remove after validation\n self.dag_module.update_inner_modules()\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n is_atomic = not isinstance(self.module, DagModule)\n result = {\n 'name': self.name,\n 'predecessor_indices': self.predecessor_indices,\n 'is_atomic': is_atomic,\n 'type': 'inner',\n 'orbit': self.orbit,\n }\n if not is_atomic:\n result['module_dict'] = self.module.config_dict(atomic_modules)\n else:\n result['module_index'] = atomic_modules.index(self.module)\n return result" }, { "identifier": "Vertex", "path": "torch_dag/core/dag_module.py", "snippet": "class Vertex:\n MAX_LEN_REPR = None\n\n def __init__(self, name: str):\n self.name = name\n\n def __repr__(self):\n if self.MAX_LEN_REPR is not None and len(self.name) > self.MAX_LEN_REPR:\n return f'{self.name[:self.MAX_LEN_REPR // 2]}...{self.name[-self.MAX_LEN_REPR // 2:]}'\n return self.name\n\n def config_dict(self, atomic_modules: List[torch.nn.Module]):\n return {\n 'name': self.name,\n }" }, { "identifier": "PASS_THROUGH_CHANNELS_CLASSES", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "PASS_THROUGH_CHANNELS_CLASSES = (\n smodules.ChannelAffineModule,\n smodules.NormalizeModule,\n smodules.LayerNormWithOptionalBias,\n smodules.TfBatchNorm1d,\n nn.BatchNorm2d,\n nn.MaxPool2d,\n nn.AvgPool2d,\n nn.AdaptiveAvgPool2d,\n nn.Dropout,\n nn.Upsample,\n nn.LayerNorm,\n nn.BatchNorm1d,\n MaskModule,\n smodules.PowerModule,\n smodules.AddcmulModule,\n smodules.HalfPixelCentersFalseBilinearUpsample,\n smodules.BilinearUpsampling,\n smodules.PadModule,\n smodules.NormalizeModule,\n smodules.InterpolateModule,\n smodules.ScalarMul,\n smodules.MeanModule,\n\n)" }, { "identifier": "is_source", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_source(module: nn.Module):\n return is_linear_source(module) or is_conv_source(module)" }, { "identifier": "get_orbits_dict", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def get_orbits_dict(dag) -> Dict:\n all_orbit_modules = set([v.module.orbit for v in dag.inner_vertices if isinstance(v.module, MaskModule)])\n return {orbit.name: orbit for orbit in all_orbit_modules}" }, { "identifier": "is_linear_source", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_linear_source(module: nn.Module):\n if isinstance(module, nn.Linear):\n return True\n return False" }, { "identifier": "is_depthwise_conv", "path": "torch_dag_algorithms/pruning/commons.py", "snippet": "def is_depthwise_conv(module: nn.Module) -> bool:\n return isinstance(module, (\n nn.Conv2d, nn.ConvTranspose2d)) and module.in_channels == module.groups and module.in_channels > 1" }, { "identifier": "OrbitModule", "path": "torch_dag_algorithms/pruning/modules.py", "snippet": "class OrbitModule(torch.nn.Module):\n\n def __init__(\n self,\n name: str,\n num_channels: int,\n distillation_mode: str = constants.PRUNING_DEFAULT_MODE_NAME,\n block_size: Optional[int] = None,\n indices_of_source_vertices=None,\n ):\n super().__init__()\n self.name = name\n self.num_channels = num_channels\n self.distillation_mode = distillation_mode\n self.block_size = block_size\n self._testing_logits = None\n self.conv1 = torch.nn.Conv2d(\n in_channels=num_channels, out_channels=num_channels, kernel_size=3, groups=num_channels)\n self.conv2 = torch.nn.Conv2d(\n in_channels=num_channels,\n out_channels=num_channels,\n kernel_size=1,\n )\n self._optionally_set_block_size_for_whole_block_pruning(distillation_mode=distillation_mode)\n self._validate_distilation_mode_and_block_size(distillation_mode=distillation_mode, block_size=block_size)\n self.bkd_masking_losses = {}\n self.indices_of_source_vertices = indices_of_source_vertices\n self.debug_logits = None\n\n def _validate_distilation_mode_and_block_size(self, distillation_mode: str, block_size: int):\n if distillation_mode not in PRUNING_MODES:\n raise NotImplementedError(f'Distillation mode: {distillation_mode} not supported')\n if distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME and block_size is None:\n raise AssertionError(f'In {constants.PRUNING_BLOCK_SNPE_MODE_NAME} pruning mode block size must not '\n f'be `None`.')\n\n def _optionally_set_block_size_for_whole_block_pruning(self, distillation_mode: str):\n if distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n self.block_size = self.num_channels\n\n @staticmethod\n def clip_logits(\n logits: torch.Tensor,\n clip_val=constants.MAX_LOGITS_ABS_VALUE,\n ) -> torch.Tensor:\n return torch.clip(logits, min=-clip_val, max=clip_val)\n\n @property\n def logits(self) -> torch.Tensor:\n # TODO This is a hack for testing, remove/refactor it\n if self.debug_logits is not None:\n return self.debug_logits\n kernel_size = self.conv1.kernel_size\n device = self.conv1.weight.device\n x = torch.ones(size=(1, self.num_channels, *kernel_size), device=device)\n x = self.conv1(x)\n x = self.conv2(x)\n x = (constants.INITIAL_LOGITS_VALUE_FOR_PRUNING + constants.SIMPLE_ORBIT_LOGITS_MULTIPLIER * x)\n return self.clip_logits(torch.mean(x, dim=(0, 2, 3), keepdim=False))\n\n def compute_average_number_of_output_channels(self):\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n return torch.sigmoid(self.logits).sum()\n\n elif self.distillation_mode in (\n constants.PRUNING_BLOCK_SNPE_MODE_NAME, constants.PRUNING_WHOLE_BLOCK_MODE_NAME):\n split_list = get_split_list_of_logits(logits=self.logits, block_size=self.block_size)\n max_per_block_logits = get_sorted_per_block_max_logits(\n logits=self.logits,\n block_size=self.block_size,\n )\n num_channels = torch.stack(\n [float(block_size) * torch.sigmoid(max_logit) for \\\n block_size, max_logit in zip(split_list, max_per_block_logits)], dim=0).sum()\n return num_channels\n else:\n msg = f'Mode {self.distillation_mode} not implemented for average channels computation.'\n raise NotImplementedError(msg)\n\n def compute_output_channel_masks(\n self,\n predecessors_channel_masks: List[List[torch.Tensor]] = None,\n ) -> List[torch.Tensor]:\n predecessors_channel_masks = [mask_list for mask_list in predecessors_channel_masks if mask_list is not None]\n logits = self.logits\n num_logits = int(logits.shape[0])\n if self.distillation_mode == constants.PRUNING_DEFAULT_MODE_NAME:\n scores_ = torch.where(\n logits > 0.0,\n 1,\n 0,\n )\n elif self.distillation_mode == constants.PRUNING_WHOLE_BLOCK_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 1:\n scores_ = np.ones(shape=(self.block_size,), dtype=np.int32)\n else:\n scores_ = np.zeros(shape=(self.block_size,), dtype=np.int32)\n\n elif self.distillation_mode == constants.PRUNING_BLOCK_SNPE_MODE_NAME:\n max_logits_per_block = get_sorted_per_block_max_logits(\n logits=logits,\n block_size=self.block_size,\n )\n max_logits_per_block_tensor = torch.stack(max_logits_per_block)\n indices_of_blocks_to_leave = np.where(max_logits_per_block_tensor > 0.)[0]\n if len(indices_of_blocks_to_leave) == 0:\n # removing whole orbit\n scores_ = np.zeros(shape=(self.num_channels,), dtype=np.int32)\n\n else:\n # compute block indices that are left\n sorted_logits = torch.sort(logits, descending=True)[0]\n split_list = get_split_list_of_logits(logits=logits, block_size=self.block_size)\n split_sorted_logits = list(torch.split(sorted_logits, split_list))\n residual = num_logits % self.block_size\n if residual != 0:\n logits_fake_tail = split_sorted_logits[-1].mean() * torch.ones(\n size=(self.block_size - residual,))\n split_sorted_logits[-1] = torch.cat([split_sorted_logits[-1], logits_fake_tail], dim=0)\n split_sorted_logits = [e.detach().numpy() for e in split_sorted_logits]\n if len(split_sorted_logits) == 1:\n res = split_sorted_logits\n else:\n res = np.take(\n split_sorted_logits,\n axis=0,\n indices=indices_of_blocks_to_leave,\n )\n threshold_value = torch.tensor(res).min()\n scores_ = np.where(\n logits >= threshold_value,\n 1,\n 0,\n )\n else:\n raise NotImplementedError\n\n if len(predecessors_channel_masks) == 0:\n return [torch.tensor(scores_)]\n else:\n return [torch.tensor(np.where(\n predecessors_channel_masks[0][0].sum() == 0,\n np.array([0] * self.num_channels, dtype=np.int32),\n scores_,\n ))]\n\n def sample(self):\n return sample_from_logits(logits=self.logits)" }, { "identifier": "compute_timm_average_num_channels", "path": "torch_dag_timm_plugin/module_multipliers.py", "snippet": "@singledispatch\ndef compute_timm_average_num_channels(\n module: torch.nn.Module,\n vertex: InnerVertex,\n average_number_input_channels: List[List[torch.Tensor]],\n orbits_dict: Dict[str, OrbitModule],\n forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]]\n) -> Union[List[torch.Tensor], None]:\n raise NotImplementedError" }, { "identifier": "CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES", "path": "torch_dag_timm_plugin/module_multipliers.py", "snippet": "CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES = ()" } ]
import logging import torch from typing import List, Tuple, Dict, Union from torch_dag import structured_modules as smodules from torch_dag.core.dag_module import DagModule from torch_dag.core.dag_module import InputVertex, InnerVertex, Vertex from torch_dag_algorithms.pruning.commons import PASS_THROUGH_CHANNELS_CLASSES from torch_dag_algorithms.pruning.commons import is_source, get_orbits_dict, is_linear_source, is_depthwise_conv from torch_dag_algorithms.pruning.modules import OrbitModule from torch_dag_timm_plugin.module_multipliers import compute_timm_average_num_channels, \ CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES
12,103
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels( vertex: InnerVertex, average_number_input_channels: List[List[torch.Tensor]], orbits_dict: Dict[str, OrbitModule], forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]] ) -> Union[List[torch.Tensor], None]: device = forward_dict[vertex.dag_module.input_vertices[0]].device if isinstance(vertex.module, PASS_THROUGH_MULTIPLIER_CLASSES): return [average_number_input_channels[0][0]] if is_source(vertex.module): if vertex.orbit is not None: orbit_module = orbits_dict[vertex.orbit] return [orbit_module.compute_average_number_of_output_channels()] else: if is_linear_source(vertex.module): return [shape_to_float(forward_dict[vertex].shape, dim=-1, device=device)] else: return [shape_to_float(forward_dict[vertex].shape, device=device)] elif is_depthwise_conv(vertex.module): return [average_number_input_channels[0][0]] elif isinstance(vertex.module, (smodules.AddModule, smodules.SubModule, smodules.MulModule)): return compute_elementwise_op_average_channels(average_number_input_channels) elif isinstance(vertex.module, smodules.ConcatModule): return [torch.stack([x[0] for x in average_number_input_channels]).sum()] elif isinstance(vertex.module, smodules.ChunkModule): assert vertex.module.dim == 1 channels = average_number_input_channels[0][0] return [channels / vertex.module.chunks for _ in range(vertex.module.chunks)] elif isinstance(vertex.module, smodules.ParameterModule): # the heuristic here is that the channel dim will be the axis with max shape max_shape = max(forward_dict[vertex].shape) return [torch.tensor(max_shape, device=device).to(torch.float32)] elif isinstance(vertex.module, smodules.TfTokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=2, device=device)] elif isinstance(vertex.module, smodules.TfDetokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=1, device=device)]
# # Copyright © TCL Research Europe. All rights reserved. # logger = logging.getLogger(__name__) PASS_THROUGH_MULTIPLIER_CLASSES = PASS_THROUGH_CHANNELS_CLASSES def shape_to_float(shape, device, dim=1): return torch.tensor(shape[dim], device=device).to(torch.float32) def compute_elementwise_op_average_channels(average_number_input_channels: List[List[torch.Tensor]], ): average_number_input_channels = [e for e in average_number_input_channels if e is not None] if len(average_number_input_channels) == 0: return None return [torch.max(torch.stack([e[0] for e in average_number_input_channels]))] def compute_average_num_channels( vertex: InnerVertex, average_number_input_channels: List[List[torch.Tensor]], orbits_dict: Dict[str, OrbitModule], forward_dict: Dict[Vertex, Union[torch.Tensor, List[torch.Tensor]]] ) -> Union[List[torch.Tensor], None]: device = forward_dict[vertex.dag_module.input_vertices[0]].device if isinstance(vertex.module, PASS_THROUGH_MULTIPLIER_CLASSES): return [average_number_input_channels[0][0]] if is_source(vertex.module): if vertex.orbit is not None: orbit_module = orbits_dict[vertex.orbit] return [orbit_module.compute_average_number_of_output_channels()] else: if is_linear_source(vertex.module): return [shape_to_float(forward_dict[vertex].shape, dim=-1, device=device)] else: return [shape_to_float(forward_dict[vertex].shape, device=device)] elif is_depthwise_conv(vertex.module): return [average_number_input_channels[0][0]] elif isinstance(vertex.module, (smodules.AddModule, smodules.SubModule, smodules.MulModule)): return compute_elementwise_op_average_channels(average_number_input_channels) elif isinstance(vertex.module, smodules.ConcatModule): return [torch.stack([x[0] for x in average_number_input_channels]).sum()] elif isinstance(vertex.module, smodules.ChunkModule): assert vertex.module.dim == 1 channels = average_number_input_channels[0][0] return [channels / vertex.module.chunks for _ in range(vertex.module.chunks)] elif isinstance(vertex.module, smodules.ParameterModule): # the heuristic here is that the channel dim will be the axis with max shape max_shape = max(forward_dict[vertex].shape) return [torch.tensor(max_shape, device=device).to(torch.float32)] elif isinstance(vertex.module, smodules.TfTokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=2, device=device)] elif isinstance(vertex.module, smodules.TfDetokenizeModule): return [shape_to_float(forward_dict[vertex].shape, dim=1, device=device)]
elif isinstance(vertex.module, CUSTOM_AVERAGE_CHANNELS_TIMM_CLASSES):
12
2023-11-17 15:36:44+00:00
16k
newcastleuniversity/DISPEL
dispel/processing/epochs.py
[ { "identifier": "Reading", "path": "dispel/data/core.py", "snippet": "class Reading(FlagMixIn):\n \"\"\"A data capture from an experiment.\n\n Attributes\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this reading\n measure_set\n A list of measures already processed on the device\n schema\n The schema of the reading\n date\n The time the reading was recorded\n device\n The device that captured the reading\n\n Parameters\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this reading\n levels\n An iterable of Level\n measure_set\n A list of measures already processed on the device\n schema\n The schema of the reading\n date\n The time the reading was recorded\n device\n The device that captured the reading\n \"\"\"\n\n def __init__(\n self,\n evaluation: Evaluation,\n session: Optional[Session] = None,\n levels: Optional[Iterable[Level]] = None,\n measure_set: Optional[MeasureSet] = None,\n schema: Optional[ReadingSchema] = None,\n date: Any = None,\n device: Optional[Device] = None,\n ):\n super().__init__()\n self.evaluation = evaluation\n self.session = session\n self.measure_set: MeasureSet = measure_set or MeasureSet()\n self.schema = schema\n self.date = pd.Timestamp(date) if date else None\n self.device = device\n self._attempt: Dict[str, int] = defaultdict(int)\n\n # verify time frame compatibility\n if (\n self.session\n and not self.session.is_incomplete\n and not self.session.contains(self.evaluation)\n ):\n raise ValueError(\"Evaluation start and end must be within session\")\n\n # create dictionary of levels\n self._levels: Dict[LevelId, Level] = {}\n\n # set level if arg is provided\n if levels:\n for level in levels:\n self.set(level)\n\n def get_level(self, level_id: Optional[LevelIdType] = None) -> Level:\n \"\"\"Get level for a given level_id.\n\n Parameters\n ----------\n level_id\n The id identifying the level.\n\n Returns\n -------\n Level\n The level identified by ``level_id``. If no level id is provided and the\n reading contains only one level it will be returned. Otherwise, the function\n will raise a :class:`ValueError`.\n\n Raises\n ------\n ValueError\n If the given id does not match any existing level within the reading.\n ValueError\n If no id has been provided, and there are multiple levels withing the\n reading.\n \"\"\"\n # check if an arg is provided\n if level_id:\n if isinstance(level_id, str):\n level_id = LevelId.from_str(level_id) # type: ignore\n # check that this is a correct id\n if level_id not in self._levels:\n raise ValueError(\n f\"{level_id=} does not match any Level in {self._levels.keys()}\"\n )\n return self._levels[level_id] # type: ignore\n\n # if no level_id provided, check if there is only one level\n if len(self._levels) == 1:\n return next(iter(self._levels.values()))\n\n # if not, ask user for a level_id\n raise ValueError(\n f\"There are {len(self._levels)} levels, please provide a level_id in\"\n f\" {self._levels.keys()}\"\n )\n\n def __repr__(self) -> str:\n return f'<Reading: {plural(\"level\", len(self))} ({self.flag_count_repr})>'\n\n def __iter__(self) -> Iterable[Tuple[LevelIdType, Level]]:\n yield from self._levels.items()\n\n def __len__(self) -> int:\n return len(self._levels)\n\n @property\n def empty(self) -> bool:\n \"\"\"Check whether the reading is empty.\"\"\"\n return len(self) == 0\n\n @property\n def levels(self) -> ValuesView[Level]:\n \"\"\"Get a list of all Level in the reading.\"\"\"\n return self._levels.values()\n\n @property\n def level_ids(self) -> List[LevelId]:\n \"\"\"Get the list of level_id keys.\"\"\"\n return [level.id for level in self._levels.values()]\n\n def has_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> bool:\n \"\"\"Check whether the reading contains the desired raw data set.\n\n Parameters\n ----------\n data_set_id\n The id of the raw data set that will be searched for.\n level_id\n The level id in which the raw data set is to searched for.\n\n Returns\n -------\n bool\n ``True`` if the raw data set exists inside the given level. ``False``\n otherwise.\n \"\"\"\n return self.get_level(level_id).has_raw_data_set(data_set_id)\n\n def get_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id and a level.\n\n Parameters\n ----------\n data_set_id\n The id of the raw data set that will be retrieved.\n level_id\n The level id from which the raw data set is to retrieved.\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id.\n \"\"\"\n return self.get_level(level_id).get_raw_data_set(data_set_id)\n\n def get_measure_set(self, level_id: Optional[LevelIdType] = None) -> MeasureSet:\n \"\"\"Get measure_set from level identified with level_id.\"\"\"\n if not level_id:\n return self.measure_set\n return self.get_level(level_id).measure_set\n\n def get_merged_measure_set(self) -> MeasureSet:\n \"\"\"Get a measure set containing all the reading's measure values.\"\"\"\n return sum(\n (self.measure_set, *(level.measure_set for level in self.levels)),\n MeasureSet(),\n )\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a reading.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n def _get_level(self, level: Optional[Union[LevelIdType, Level]] = None) -> Level:\n \"\"\"Get level from id or level itself.\"\"\"\n if isinstance(level, Level):\n return level\n return self.get_level(level)\n\n @set.register(MeasureSet)\n def _measure_set(\n self,\n value: MeasureSet,\n level: Optional[Union[LevelIdType, Level]] = None,\n ):\n if level is None:\n self.measure_set += value\n else:\n self._get_level(level).set(value)\n\n @set.register(MeasureValue)\n def _measure_value(\n self,\n value: MeasureValue,\n level: Optional[Union[LevelIdType, Level]] = None,\n epoch: Optional[LevelEpoch] = None,\n ):\n if epoch is not None:\n epoch.set(value)\n else:\n if level is None:\n measure_set = self.measure_set\n else:\n measure_set = self._get_level(level).measure_set\n\n measure_set.set(value)\n\n @set.register(RawDataSet)\n def _raw_data_set(\n self,\n value: RawDataSet,\n level: Union[LevelIdType, Level],\n concatenate: bool = False,\n overwrite: bool = False,\n ):\n self._get_level(level).set(value, concatenate=concatenate, overwrite=overwrite)\n\n @set.register(LevelEpoch)\n def _epoch_measure_set(self, value: LevelEpoch, level: Union[LevelIdType, Level]):\n self._get_level(level).set(value)\n\n @set.register(Level)\n def _level(self, value: Level):\n \"\"\"Set a level.\"\"\"\n level_id_str = str(value.id)\n for lev in self._levels:\n if str(lev).startswith(level_id_str) and level_id_str in self._attempt:\n self._attempt[level_id_str] += 1\n break\n if level_id_str not in self._attempt:\n new_level = LevelId.from_str(level_id_str)\n self._levels[new_level] = value # type: ignore\n self._attempt[str(new_level.id)] = 1\n else:\n new_level_id_str = \"-\".join(\n [level_id_str, str(self._attempt[level_id_str]).zfill(2)]\n )\n value.id = cast(LevelId, LevelId.from_str(new_level_id_str))\n self._levels[value.id] = value\n # TODO: use sorting by effective time frame to ensure orders to\n # attempts :\n # level_ids = sorted(level_ids, key=lambda x:\n # reading.get_level(x).effective_time_frame.start )\n self._levels[value.id].context.set(\n value=self._attempt[level_id_str],\n definition=ValueDefinition(\n id_=\"attempt\", name=f\"The attempt number: {self._attempt[level_id_str]}\"\n ),\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "Epoch", "path": "dispel/data/epochs.py", "snippet": "class Epoch(FlagMixIn):\n \"\"\"An epoch marking a specific time point or period.\n\n Parameters\n ----------\n start\n The beginning of the epoch.\n end\n An optional end of the epoch. If no end is provided, the epoch end will be\n considered in the future and the :data:`Epoch.is_incomplete` property will be\n `True`.\n definition\n An optional definition of the epoch.\n \"\"\"\n\n def __init__(\n self,\n start: Any,\n end: Any,\n definition: Optional[EpochDefinition] = None,\n ):\n super().__init__()\n\n self.start = start\n self.end = end\n self.definition = definition\n\n @property\n def start(self) -> pd.Timestamp:\n \"\"\"Get the beginning of the epoch.\n\n Returns\n -------\n pandas.Timestamp\n The beginning of the epoch.\n \"\"\"\n return self._start\n\n @start.setter\n def start(self, value: Union[int, float, str, datetime, datetime64]):\n \"\"\"Set the beginning of the epoch.\n\n Parameters\n ----------\n value\n The start of the epoch.\n\n Raises\n ------\n ValueError\n Risen if the provided value is null.\n \"\"\"\n self._start = pd.Timestamp(value)\n if pd.isnull(self.start):\n raise ValueError(\"Start date cannot be null\")\n\n @property\n def end(self) -> Optional[pd.Timestamp]:\n \"\"\"Get the end of the epoch.\n\n Returns\n -------\n pandas.Timestamp\n The end of the epoch. `None`, if the epoch end has not been observed (i.e.,\n was not set).\n \"\"\"\n return self._end\n\n @end.setter\n def end(self, value: Optional[Union[int, float, str, datetime, datetime64]]):\n \"\"\"Set the end of the epoch.\n\n Parameters\n ----------\n value\n The end of the epoch. If `None` is provided, the epoch end is considered to\n be in the future and :data:`Epoch.is_incomplete` is ``True``.\n\n Raises\n ------\n ValueError\n If the `start` is after the `end`.\n \"\"\"\n self._end = pd.Timestamp(value)\n if self.start > self.end:\n raise ValueError(f\"Start cannot be after end: {self.start} > {self.end}\")\n\n @property\n def id(self) -> DefinitionId:\n \"\"\"Get the ID from the definition of the epoch.\n\n Returns\n -------\n DefinitionId\n The id of the :data:`Epoch.definition`.\n\n Raises\n ------\n AttributeError\n Will be risen if no definition was set for the epoch.\n \"\"\"\n if self.definition is None:\n raise AttributeError(\"No definition was provided for epoch\")\n return self.definition.id\n\n def __hash__(self):\n return hash((self.id, self.start, self.end))\n\n def __repr__(self):\n return f\"<{self.__class__.__name__}: {self.start} - {self.end}>\"\n\n def _test_overlap_contain(\n self,\n other: Union[\"Epoch\", datetime, pd.Timestamp],\n method: Callable[[Iterable[bool]], bool],\n ) -> bool:\n if isinstance(other, Epoch):\n return method((self.overlaps(other.start), self.overlaps(other.end)))\n\n assert self.end is not None, \"Can only test with closed epochs\"\n if isinstance(other, (datetime, pd.Timestamp)):\n return self.start <= other <= self.end\n\n raise ValueError(\"Can only test for datetime or Epoch values\")\n\n @property\n def duration(self) -> pd.Timedelta:\n \"\"\"Get the duration of the epoch.\n\n Returns\n -------\n pandas.Timedelta\n The duration of the epoch.\n\n Raises\n ------\n ValueError\n If the epoch has no end.\n \"\"\"\n if self.is_incomplete:\n raise ValueError(\"Cannot retrieve duration for incomplete epochs\")\n return self.end - self.start\n\n @property\n def is_incomplete(self) -> bool:\n \"\"\"Check if the epoch has an end date.\n\n An epoch is considered incomplete if it does not have an end date time.\n\n Returns\n -------\n bool\n `True` if the end date time is unknown. Otherwise, `False`.\n\n \"\"\"\n return pd.isnull(self.end)\n\n def overlaps(self, other: Union[\"Epoch\", datetime, pd.Timestamp]) -> bool:\n \"\"\"Test if `other` overlaps with this epoch.\n\n Parameters\n ----------\n other\n The other epoch or datetime-like object to be tested.\n\n Returns\n -------\n bool\n If an epoch is provided ``overlap`` will be ``True`` if either the ``start``\n or ``end`` of the ``other`` epoch is within the ``start`` or ``end`` of\n this epoch. If only a datetime object is provided, the result is ``True`` if\n the time is between ``start`` and ``end`` including the boundaries.\n \"\"\"\n return self._test_overlap_contain(other, any)\n\n def contains(self, other: Union[\"Epoch\", datetime, pd.Timestamp]) -> bool:\n \"\"\"Test if ``other`` is contained within this epoch.\n\n Parameters\n ----------\n other\n The other epoch or datetime-like object to be tested.\n\n Returns\n -------\n bool\n If an epoch is provided ``contains`` will be ``True`` if both the ``start``\n and ``end`` of the ``other`` epoch is within the ``start`` and ``end`` of\n this epoch. If only a datetime object is provided, the result is ``True`` if\n the time is between ``start`` and ``end`` including the boundaries.\n \"\"\"\n return self._test_overlap_contain(other, all)" }, { "identifier": "EpochDefinition", "path": "dispel/data/epochs.py", "snippet": "class EpochDefinition:\n \"\"\"The definition of an epoch.\n\n Parameters\n ----------\n id_\n The identifier of the epoch. This identifier does not have to be unique\n across multiple epochs and can serve as a type of epoch.\n name\n An optional plain-text name of the epoch definition.\n description\n A detailed description of the epoch providing additional resolution beyond\n the ``name`` property.\n\n Attributes\n ----------\n name\n An optional plain-text name of the epoch definition.\n description\n A detailed description of the epoch providing additional resolution beyond\n the ``name`` property.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, DefinitionId],\n name: Optional[str] = None,\n description: Optional[str] = None,\n ):\n self.id = id_ # type: ignore\n self.name = name\n self.description = description\n\n @property\n def id(self) -> DefinitionId:\n \"\"\"Get the ID of the definition.\n\n Returns\n -------\n DefinitionId\n The ID of the epoch definition.\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the definition.\n\n Parameters\n ----------\n value\n The ID of the definition. The ID has to be unique with respect to the\n time points of the :class:`Epoch`, i.e., if an epoch has the same ID,\n start, and end, it is considered equal.\n \"\"\"\n if not isinstance(value, DefinitionId):\n value = DefinitionId(value)\n self._id = value" }, { "identifier": "Level", "path": "dispel/data/levels.py", "snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "LevelEpoch", "path": "dispel/data/levels.py", "snippet": "class LevelEpoch(Epoch, MeasureSet):\n \"\"\"Level specific epoch with measures.\n\n This data model allows to store measures that are specific to a given time point\n during an evaluation.\n \"\"\"\n\n VALUE_CLS = LevelEpochMeasureValue" }, { "identifier": "LevelEpochMeasureValue", "path": "dispel/data/levels.py", "snippet": "class LevelEpochMeasureValue(MeasureValue):\n \"\"\"A measure value for a specific epoch.\n\n Parameters\n ----------\n epoch\n The epoch for which the measure value was extracted.\n\n Attributes\n ----------\n epoch\n The epoch for which the measure value was extracted.\n \"\"\"\n\n def __init__(self, epoch: \"LevelEpoch\", *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.epoch = epoch\n\n def __hash__(self):\n return hash((self.definition, self.value, self.epoch))\n\n def to_dict(self, stringify: bool = False) -> Dict[str, Optional[Any]]:\n \"\"\"Get a dictionary representation of measure information.\n\n Parameters\n ----------\n stringify\n ``True`` if all dictionary values are converted to strings. ``False``\n otherwise.\n\n Returns\n -------\n Dict[str, Optional[Any]]\n A dictionary summarizing measure value and epoch information.\n \"\"\"\n res = super().to_dict(stringify=stringify)\n\n if self.epoch.definition is not None:\n res[\"epoch_id\"] = self.epoch.definition.id\n res[\"epoch_name\"] = self.epoch.definition.name\n\n if stringify:\n res[\"epoch_start\"] = self._to_string(self.epoch.start)\n res[\"epoch_end\"] = self._to_string(self.epoch.end)\n else:\n res[\"epoch_start\"] = self.epoch.start\n res[\"epoch_end\"] = self.epoch.end\n\n return res" }, { "identifier": "MeasureValue", "path": "dispel/data/measures.py", "snippet": "class MeasureValue(FlagMixIn, Value):\n \"\"\"A measure value.\"\"\"\n\n def __repr__(self):\n return (\n f\"<MeasureValue ({self.definition}): {self.value} \"\n f\"({self.flag_count_repr})>\"\n )\n\n @staticmethod\n def _to_string(value):\n return \"\" if value is None else str(value)\n\n def to_dict(self, stringify: bool = False) -> Dict[str, Optional[Any]]:\n \"\"\"Get a dictionary representation of measure information.\n\n Parameters\n ----------\n stringify\n ``True`` if all dictionary values are converted to strings. ``False``\n otherwise.\n\n Returns\n -------\n Dict[str, Optional[Any]]\n A dictionary summarizing measure value information.\n \"\"\"\n measure_min, measure_max = None, None\n if isinstance(self.definition.validator, RangeValidator):\n measure_min = self.definition.validator.lower_bound\n measure_max = self.definition.validator.upper_bound\n\n if stringify:\n value = str(self.value)\n measure_min = self._to_string(measure_min)\n measure_max = self._to_string(measure_max)\n else:\n value = self.value\n\n return dict(\n measure_id=str(self.id),\n measure_name=self.definition.name,\n measure_value=value,\n measure_unit=self.definition.unit,\n measure_type=self.definition.data_type,\n measure_min=measure_min,\n measure_max=measure_max,\n )" }, { "identifier": "RawDataSet", "path": "dispel/data/raw.py", "snippet": "class RawDataSet(FlagMixIn):\n \"\"\"A raw data set.\n\n Parameters\n ----------\n definition\n The definition of the raw data set\n data\n The data set\n \"\"\"\n\n def __init__(self, definition: RawDataSetDefinition, data: pd.DataFrame):\n super().__init__()\n self.definition = definition\n self.data = data\n\n precision_exists = any(\n [d.precision is not None for d in self.definition.value_definitions]\n )\n if precision_exists:\n # if precision exists then store the original data prior to any rounding\n self.raw_data = data\n\n def_ids = {d.id for d in self.definition.value_definitions if not d.is_index}\n data_ids = set(data.columns)\n\n diff_data_columns = data_ids - def_ids\n if diff_data_columns:\n raise ValueError(f\"Missing definition for column(s): {diff_data_columns}\")\n\n diff_def_ids = def_ids - data_ids\n if diff_def_ids:\n raise ValueError(f\"Missing columns for definition(s): {diff_def_ids}\")\n\n # for each column definition check if precision exists and apply it to the data\n for col_def in self.definition.value_definitions:\n if col_def.precision is not None:\n self.data[col_def.id.id] = round(\n self.data[col_def.id.id], ndigits=col_def.precision\n )\n\n @property\n def id(self) -> str:\n \"\"\"Get the identifier from the definition of the raw data set.\"\"\"\n return self.definition.id\n\n def __repr__(self) -> str:\n return f\"<RawDataSet: {self.id} ({self.flag_count_repr})>\"\n\n def concat(self, other: \"RawDataSet\") -> \"RawDataSet\":\n \"\"\"Concatenate two raw data sets.\"\"\"\n if self.definition != other.definition:\n raise ValueError(\"Can only concatenate data sets with equal definitions\")\n return RawDataSet(self.definition, pd.concat([self.data, other.data]))" }, { "identifier": "RawDataSetDefinition", "path": "dispel/data/raw.py", "snippet": "class RawDataSetDefinition:\n \"\"\"The definition of a raw data set.\"\"\"\n\n #: The identifier of the raw data set definition\n id: str\n #: The source of the raw data set\n source: RawDataSetSource\n value_definitions_list: InitVar[Iterable[RawDataValueDefinition]]\n is_computed: bool = False\n \"\"\"`True` if the raw data source is computed. ``False`` if it is a measured\n source without transformation, e.g. acceleration recorded from the low\n level APIs.\"\"\"\n _value_definitions: Dict[DefinitionId, ValueDefinition] = field(init=False)\n\n def __post_init__(self, value_definitions_list):\n self._value_definitions = _create_value_definition_dict(value_definitions_list)\n\n @property\n def value_definitions(self):\n \"\"\"Get the value definitions of the raw data set.\"\"\"\n return self._value_definitions.values()\n\n def get_value_definition(self, id_: DefinitionId):\n \"\"\"Get a value definition.\"\"\"\n return self._value_definitions[id_]\n\n def __hash__(self):\n return hash(self.id)\n\n def __eq__(self, other):\n return (\n isinstance(other, RawDataSetDefinition)\n and self.id == other.id\n and self.source == other.source\n and eq(set(self.value_definitions), set(other.value_definitions))\n and self.is_computed == other.is_computed\n )" }, { "identifier": "RawDataSetSource", "path": "dispel/data/raw.py", "snippet": "class RawDataSetSource:\n \"\"\"The source of a raw data set.\"\"\"\n\n #: The manufacturer producing the raw data set source\n manufacturer: str" }, { "identifier": "RawDataValueDefinition", "path": "dispel/data/raw.py", "snippet": "class RawDataValueDefinition(ValueDefinition):\n \"\"\"The definition of raw data set values.\n\n Attributes\n ----------\n is_index\n ``True`` if the values are part of the raw data set index. Otherwise, ``False``.\n \"\"\"\n\n def __init__(\n self,\n id_: str,\n name: str,\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n precision: Optional[int] = None,\n is_index: bool = False,\n ):\n super().__init__(\n id_=id_,\n name=name,\n unit=unit,\n description=description,\n data_type=data_type,\n precision=precision,\n )\n self.is_index = is_index" }, { "identifier": "ProcessResultType", "path": "dispel/processing/core.py", "snippet": "class ProcessingError(Exception):\nclass StopProcessingError(ProcessingError):\nclass FlagError(ProcessingError):\nclass InvalidDataError(ProcessingError):\nclass ProcessingResultBase:\nclass ProcessingResult(ProcessingResultBase):\nclass ErrorHandling(Enum):\nclass ProcessingControlResult(ProcessingResultBase):\nclass Parameter(Generic[ParameterType]):\nclass ProcessingStep:\nclass CoreProcessingStepGroup(ProcessingStep):\nclass _ChainedProcesses(CoreProcessingStepGroup):\nclass FlagReadingStep(FlagStepMixin, ProcessingStep):\n def __init__(self, message: str, step: \"ProcessingStep\"):\n def __init__(self, flag: Flag, step: \"ProcessingStep\"):\n def get_kwargs(self) -> Dict[str, Any]:\n def get_kwargs(self) -> Dict[str, Any]:\n def get_sources(self) -> Iterable[SourcesType]:\n def should_raise(self) -> bool:\n def __bool__(self) -> bool:\n def from_bool(cls, stop_processing: bool) -> \"ErrorHandling\":\n def __post_init__(self):\n def get_targets(self) -> Iterable[EntityType]:\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\n def __new__(cls, id_: str, *_args, **_kwargs):\n def __init__(\n self,\n id_: str,\n default_value: Optional[ParameterType] = None,\n validator: Optional[Callable[[Any], None]] = None,\n description: Optional[str] = None,\n ):\n def id(self):\n def value(self) -> ParameterType:\n def value(self, value: ParameterType):\n def has_parameter(cls, full_id: str) -> bool:\n def set_value(cls, full_id: str, value: Any):\n def __init__(self):\n def process(self, reading: Reading, **kwargs) -> ProcessResultType:\n def assert_valid_reading(self, reading: Reading, **kwargs):\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def set_previous(self, step: \"ProcessingStep\"):\n def set_next(self, step: \"ProcessingStep\"):\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n def __and__(self, other):\n def get_parameters(self) -> List[Tuple[str, Parameter]]:\n def __new__(cls, *args, **kwargs):\n def __init__(self, steps: Optional[List[ProcessingStep]] = None, **kwargs):\n def set_kwargs(self, **kwargs):\n def get_kwargs(self) -> Dict[str, Any]:\n def set_steps(self, steps: List[ProcessingStep]):\n def get_steps(self) -> List[ProcessingStep]:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def chain(self, successor: \"ProcessingStep\") -> \"ProcessingStep\":\n def __init__(\n self,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def get_reading_flag_targets(\n self, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_reading(self, reading: Reading, **kwargs) -> Generator[Flag, None, None]:\n RAISE = \"raise\"\n IGNORE = \"ignore\"" }, { "identifier": "DataSetProcessingStepProtocol", "path": "dispel/processing/data_set.py", "snippet": "class RawDataSetProcessingResult(LevelProcessingResult):\nclass StorageError(Enum):\nclass DataSetProcessingStepProtocol(metaclass=ABCMeta):\nclass DataSetProcessingStepMixin(\n TaskMixin,\n DataSetProcessingStepProtocol,\n LevelProcessingStepProtocol,\n metaclass=ABCMeta,\n):\nclass DataSetProcessingStep(\n DataSetProcessingStepMixin, LevelProcessingStep, metaclass=ABCMeta\n):\nclass MutateDataSetProcessingStepBase(DataSetProcessingStep, metaclass=ABCMeta):\nclass FlagDataSetStep(FlagStepMixin, DataSetProcessingStep, metaclass=ABCMeta):\n def __post_init__(self):\n def overwrite(self) -> bool:\n def concatenate(self) -> bool:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def get_data_set_ids(self) -> Iterable[str]:\n def get_raw_data_sets(self, level: Level) -> List[RawDataSet]:\n def get_data_frames(self, level: Level) -> List[pd.DataFrame]:\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n def get_data_sets_flag_targets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Iterable[EntityType]:\n def __init__(self, *args, **kwargs):\n def get_data_set_ids(self) -> Iterable[str]:\n def get_raw_data_sets(self, level: Level) -> List[RawDataSet]:\n def get_data_frames(self, level: Level) -> List[pd.DataFrame]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\ndef transformation(_func=None, **kwargs):\n def wrapper(func):\ndef decorated_processing_function(\n func: Callable[..., Any],\n data_sets: Sequence[pd.DataFrame],\n reading: Reading,\n level: Level,\n **kwargs,\n) -> Any:\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n level_filter: Optional[LevelFilterType] = None,\n ):\n def get_transform_function(self) -> Optional[Callable[..., Any]]:\n def get_transform_functions(self) -> TransformationFunctionGeneratorType:\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n target_ids: Optional[Union[Iterable[str], str]] = None,\n ):\n def get_target_ids(self) -> Iterable[str]:\n def process_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> ProcessResultType:\n def get_data_sets_flag_targets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ) -> Generator[Flag, None, None]:\n RAISE = \"raise\"\n IGNORE = \"ignore\"\n OVERWRITE = \"overwrite\"\n CONCATENATE = \"concatenate\"" }, { "identifier": "ExtractStep", "path": "dispel/processing/extract.py", "snippet": "class ExtractStep(\n MeasureDefinitionMixin, TransformStepChainMixIn, MutateDataSetProcessingStepBase\n):\n r\"\"\"A measure extraction processing step.\n\n This class provides a convenient way to extract a measure from one or more data sets\n by specifying their id, their level_ids or level filter, a transformation function\n and a measure value definition.\n\n Parameters\n ----------\n data_set_ids\n An optional list of data set ids to be used for the transformation. See\n :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`.\n transform_function\n An optional function to be applied to the data sets. See\n :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`.\n definition\n An optional value definition or prototype. See\n :class:`MeasureDefinitionMixin`.\n level_filter\n An optional filter to limit the levels being processed. See\n :class:`~dispel.processing.level.LevelProcessingStep`.\n yield_if_nan\n If ``True``, yield null values as measure values. Otherwise, processing\n will not return a measure value in case of a null result for the extraction.\n\n Examples\n --------\n Assuming we wanted to compute the maximum value of a raw data set we can create the\n following step\n\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing.extract import ExtractStep\n >>> step = ExtractStep(\n ... 'data-set-id',\n ... lambda data: data.max(axis=0),\n ... ValueDefinition('maximum','Maximum value')\n ... )\n\n A common approach is to define a processing step for re-use and leveraging the\n ``@transformation`` decorator to specify the transformation function:\n\n >>> import pandas as pd\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing.extract import ExtractStep\n >>> from dispel.processing.data_set import transformation\n >>> class MyExtractStep(ExtractStep):\n ... data_set_ids = 'data-set-id'\n ... definition = ValueDefinition('maximum','Maximum value')\n ...\n ... @transformation\n ... def _max(self, data: pd.DataFrame) -> float:\n ... return data.max(axis=0)\n\n Often one wants to extract multiple measures from one data set. This can be achieved\n by using prototypes and optional named arguments with ``@transformation``:\n\n >>> import pandas as pd\n >>> from dispel.data.values import ValueDefinitionPrototype\n >>> from dispel.processing.extract import ExtractStep\n >>> from dispel.processing.data_set import transformation\n >>> class MyExtractStep(ExtractStep):\n ... data_set_ids = 'data-set-id'\n ... definition = ValueDefinitionPrototype(\n ... id_='id-{agg_abbr}',\n ... name='{agg} value'\n ... )\n ...\n ... @transformation(agg='Maximum', agg_abbr='max')\n ... def _max(self, data: pd.DataFrame) -> float:\n ... return data.max(axis=0)\n ...\n ... @transformation(agg='Minimum', agg_abbr='min')\n ... def _min(self, data: pd.DataFrame) -> float:\n ... return data.min(axis=0)\n\n \"\"\"\n\n yield_if_nan: bool = False\n\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None,\n level_filter: Optional[LevelFilterType] = None,\n yield_if_nan: Optional[bool] = None,\n ):\n super().__init__(\n definition=definition,\n data_set_ids=data_set_ids,\n transform_function=transform_function,\n level_filter=level_filter,\n )\n self.yield_if_nan = yield_if_nan or self.yield_if_nan\n\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n \"\"\"Wrap the result from the processing function into a class.\n\n Parameters\n ----------\n res\n Any result returned by the extraction step. If res is a\n :class:`~dispel.data.flags.WrappedResult`, the flag contained\n in the object will be automatically added to the\n :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped\n results will always translate into flagged\n :class:`~dispel.data.measures.MeasureValue`.\n level\n The current level\n reading\n The current reading\n kwargs\n Additional kwargs\n\n Yields\n ------\n LevelProcessingResult\n The processing result\n \"\"\"\n try:\n if len(res) == 0:\n res = math.nan\n warnings.warn(\"Extract step returned an iterable!\", UserWarning)\n except TypeError:\n pass\n if is_wrapped := isinstance(res, WrappedResult):\n measure_value = res.measure_value\n else:\n measure_value = res\n\n if not (is_nan := math.isnan(measure_value)) or (is_nan and self.yield_if_nan):\n value = self.get_value(measure_value, **kwargs)\n # If result is wrapped, add the flag to the measure value\n if is_wrapped:\n value.add_flags(res, ignore_duplicates=True)\n\n yield LevelProcessingResult(\n step=self,\n sources=self.get_raw_data_sets(level),\n result=value,\n level=level,\n )" }, { "identifier": "LevelProcessingResult", "path": "dispel/processing/level.py", "snippet": "class LevelProcessingResult(ProcessingResult, LevelProcessingResultBase):\n \"\"\"The processing result of a level processing step.\"\"\"" }, { "identifier": "TransformStepChainMixIn", "path": "dispel/processing/transform.py", "snippet": "class TransformStepChainMixIn(DataSetProcessingStepProtocol, metaclass=ABCMeta):\n \"\"\"A mixin class that allows to chain transformation steps.\n\n The basic idea is to leverage the new data set ids from the previous transform step\n as the required data set ids for the current step. This avoids having to define the\n `data_set_ids` attribute.\n \"\"\"\n\n def get_data_set_ids(self) -> Iterable[str]:\n \"\"\"Get the data set ids to be processed.\n\n This uses the new data set ids from a previous transform step if set. Otherwise,\n falls back to the default behavior of returning the set data set ids from the\n constructor or class variable.\n\n Returns\n -------\n Iterable[str]\n An iterable of data set ids.\n \"\"\"\n assert isinstance(\n self, ProcessingStep\n ), \"TransformStepChainMixIn must inherit from ProcessingStep\"\n # pylint: disable=no-member\n if isinstance(self.predecessor, TransformStep):\n return [self.predecessor.get_new_data_set_id()]\n # pylint: enable=no-member\n\n return super().get_data_set_ids() # type: ignore[safe-super]" } ]
from abc import ABC, ABCMeta from dataclasses import dataclass from typing import Any, Iterable, List, Optional, Sequence, Tuple, Union from dispel.data.core import Reading from dispel.data.epochs import Epoch, EpochDefinition from dispel.data.levels import Level, LevelEpoch, LevelEpochMeasureValue from dispel.data.measures import MeasureValue from dispel.data.raw import ( RawDataSet, RawDataSetDefinition, RawDataSetSource, RawDataValueDefinition, ) from dispel.processing.core import ProcessResultType from dispel.processing.data_set import ( DataSetProcessingStepProtocol, MutateDataSetProcessingStepBase, RawDataSetProcessingResult, StorageError, WrapResultGeneratorType, ) from dispel.processing.extract import ExtractStep from dispel.processing.level import LevelProcessingResult from dispel.processing.transform import TransformStepChainMixIn import pandas as pd
12,044
"""Epoch-specific processing steps.""" class LevelEpochDefinitionMixIn: """A mixin-class for processing steps producing epoch measure sets. Parameters ---------- definition An optional epoch definition. If no epoch definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. Attributes ---------- definition The epoch definition. This will be used in :func:`get_definition` by default. You can overwrite the function to implement custom logic. """ definition: Optional[EpochDefinition] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **_kwargs) -> EpochDefinition: """Get the measure definition. Other Parameters ---------------- _kwargs Optional parameters that will be passed along to the creation of epoch definitions. This can be used to implement custom logic in the epoch definition that depends on processing arguments. Returns ------- EpochDefinition The definition of the epoch """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." return self.definition class CreateLevelEpochStep(
"""Epoch-specific processing steps.""" class LevelEpochDefinitionMixIn: """A mixin-class for processing steps producing epoch measure sets. Parameters ---------- definition An optional epoch definition. If no epoch definition is provided, the :data:`definition` class variable will be used. Alternatively, one can overwrite :meth:`get_definition` to provide the definition. Attributes ---------- definition The epoch definition. This will be used in :func:`get_definition` by default. You can overwrite the function to implement custom logic. """ definition: Optional[EpochDefinition] = None def __init__(self, *args, **kwargs): definition = kwargs.pop("definition", None) self.definition = definition or self.definition super().__init__(*args, **kwargs) def get_definition(self, **_kwargs) -> EpochDefinition: """Get the measure definition. Other Parameters ---------------- _kwargs Optional parameters that will be passed along to the creation of epoch definitions. This can be used to implement custom logic in the epoch definition that depends on processing arguments. Returns ------- EpochDefinition The definition of the epoch """ assert ( self.definition is not None ), "Definition must be set or get_definition must be overwritten." return self.definition class CreateLevelEpochStep(
LevelEpochDefinitionMixIn, TransformStepChainMixIn, MutateDataSetProcessingStepBase
12
2023-11-14 10:06:46+00:00
16k
believethehype/nostrdvm
main.py
[ { "identifier": "Bot", "path": "nostr_dvm/bot.py", "snippet": "class Bot:\n job_list: list\n\n # This is a simple list just to keep track which events we created and manage, so we don't pay for other requests\n def __init__(self, dvm_config, admin_config=None):\n self.NAME = \"Bot\"\n dvm_config.DB = \"db/\" + self.NAME + \".db\"\n self.dvm_config = dvm_config\n nip89config = NIP89Config()\n nip89config.NAME = self.NAME\n self.dvm_config.NIP89 = nip89config\n self.admin_config = admin_config\n self.keys = Keys.from_sk_str(dvm_config.PRIVATE_KEY)\n wait_for_send = True\n skip_disconnected_relays = True\n opts = (Options().wait_for_send(wait_for_send).send_timeout(timedelta(seconds=self.dvm_config.RELAY_TIMEOUT))\n .skip_disconnected_relays(skip_disconnected_relays))\n signer = ClientSigner.keys(self.keys)\n self.client = Client.with_opts(signer, opts)\n\n pk = self.keys.public_key()\n\n self.job_list = []\n\n print(\"Nostr BOT public key: \" + str(pk.to_bech32()) + \" Hex: \" + str(pk.to_hex()) + \" Name: \" + self.NAME +\n \" Supported DVM tasks: \" +\n ', '.join(p.NAME + \":\" + p.TASK for p in self.dvm_config.SUPPORTED_DVMS) + \"\\n\")\n\n for relay in self.dvm_config.RELAY_LIST:\n self.client.add_relay(relay)\n self.client.connect()\n\n zap_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_ZAP]).since(Timestamp.now())\n dm_filter = Filter().pubkey(pk).kinds([EventDefinitions.KIND_DM]).since(Timestamp.now())\n kinds = [EventDefinitions.KIND_NIP90_GENERIC, EventDefinitions.KIND_FEEDBACK]\n for dvm in self.dvm_config.SUPPORTED_DVMS:\n if dvm.KIND not in kinds:\n kinds.append(dvm.KIND + 1000)\n dvm_filter = (Filter().kinds(kinds).since(Timestamp.now()))\n\n self.client.subscribe([zap_filter, dm_filter, dvm_filter])\n\n create_sql_table(self.dvm_config.DB)\n admin_make_database_updates(adminconfig=self.admin_config, dvmconfig=self.dvm_config, client=self.client)\n\n class NotificationHandler(HandleNotification):\n client = self.client\n dvm_config = self.dvm_config\n keys = self.keys\n\n def handle(self, relay_url, nostr_event):\n if (EventDefinitions.KIND_NIP90_EXTRACT_TEXT + 1000 <= nostr_event.kind()\n <= EventDefinitions.KIND_NIP90_GENERIC + 1000):\n handle_nip90_response_event(nostr_event)\n elif nostr_event.kind() == EventDefinitions.KIND_FEEDBACK:\n handle_nip90_feedback(nostr_event)\n elif nostr_event.kind() == EventDefinitions.KIND_DM:\n handle_dm(nostr_event)\n elif nostr_event.kind() == EventDefinitions.KIND_ZAP:\n handle_zap(nostr_event)\n\n def handle_msg(self, relay_url, msg):\n return\n\n def handle_dm(nostr_event):\n sender = nostr_event.pubkey().to_hex()\n if sender == self.keys.public_key().to_hex():\n return\n\n try:\n decrypted_text = nip04_decrypt(self.keys.secret_key(), nostr_event.pubkey(), nostr_event.content())\n user = get_or_add_user(db=self.dvm_config.DB, npub=sender, client=self.client, config=self.dvm_config)\n print(\"[\" + self.NAME + \"] Message from \" + user.name + \": \" + decrypted_text)\n\n # if user selects an index from the overview list...\n if decrypted_text[0].isdigit():\n split = decrypted_text.split(' ')\n index = int(split[0]) - 1\n # if user sends index info, e.g. 1 info, we fetch the nip89 information and reply with it.\n if len(split) > 1 and split[1].lower() == \"info\":\n answer_nip89(nostr_event, index)\n # otherwise we probably have to do some work, so build an event from input and send it to the DVM\n else:\n task = self.dvm_config.SUPPORTED_DVMS[index].TASK\n print(\"[\" + self.NAME + \"] Request from \" + str(user.name) + \" (\" + str(user.nip05) +\n \", Balance: \" + str(user.balance) + \" Sats) Task: \" + str(task))\n\n if user.isblacklisted:\n # If users are blacklisted for some reason, tell them.\n answer_blacklisted(nostr_event)\n\n else:\n # Parse inputs to params\n tags = build_params(decrypted_text, nostr_event, index)\n p_tag = Tag.parse(['p', self.dvm_config.SUPPORTED_DVMS[index].PUBLIC_KEY])\n\n if self.dvm_config.SUPPORTED_DVMS[index].SUPPORTS_ENCRYPTION:\n tags_str = []\n for tag in tags:\n tags_str.append(tag.as_vec())\n params_as_str = json.dumps(tags_str)\n print(params_as_str)\n # and encrypt them\n encrypted_params = nip04_encrypt(self.keys.secret_key(),\n PublicKey.from_hex(\n self.dvm_config.SUPPORTED_DVMS[index].PUBLIC_KEY),\n params_as_str)\n # add encrypted and p tag on the outside\n encrypted_tag = Tag.parse(['encrypted'])\n # add the encrypted params to the content\n nip90request = (EventBuilder(self.dvm_config.SUPPORTED_DVMS[index].KIND,\n encrypted_params, [p_tag, encrypted_tag]).\n to_event(self.keys))\n else:\n tags.append(p_tag)\n\n nip90request = (EventBuilder(self.dvm_config.SUPPORTED_DVMS[index].KIND,\n \"\", tags).\n to_event(self.keys))\n\n # remember in the job_list that we have made an event, if anybody asks for payment,\n # we know we actually sent the request\n entry = {\"npub\": user.npub, \"event_id\": nip90request.id().to_hex(),\n \"dvm_key\": self.dvm_config.SUPPORTED_DVMS[index].PUBLIC_KEY, \"is_paid\": False}\n self.job_list.append(entry)\n\n # send the event to the DVM\n send_event(nip90request, client=self.client, dvm_config=self.dvm_config)\n # print(nip90request.as_json())\n\n\n\n elif decrypted_text.lower().startswith(\"balance\"):\n time.sleep(3.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"Your current balance is \" + str(\n user.balance) + \" Sats. Zap me to add to your balance. I will use your balance interact with the DVMs for you.\\n\"\n \"I support both public and private Zaps, as well as Zapplepay.\\n\"\n \"Alternativly you can add a #cashu token with \\\"-cashu cashuASomeToken\\\" to your command.\\n Make sure the token is worth the requested amount + \"\n \"mint fees (at least 3 sat).\\n Not all DVMs might accept Cashu tokens.\"\n , None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n elif decrypted_text.startswith(\"cashuA\"):\n print(\"Received Cashu token:\" + decrypted_text)\n cashu_redeemed, cashu_message, total_amount, fees = redeem_cashu(decrypted_text, self.dvm_config,\n self.client)\n print(cashu_message)\n if cashu_message == \"success\":\n update_user_balance(self.dvm_config.DB, sender, total_amount, client=self.client,\n config=self.dvm_config)\n else:\n time.sleep(2.0)\n message = \"Error: \" + cashu_message + \". Token has not been redeemed.\"\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, PublicKey.from_hex(sender), message,\n None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=self.dvm_config)\n elif decrypted_text.lower().startswith(\"what's the second best\"):\n time.sleep(3.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"No, there is no second best.\\n\\nhttps://cdn.nostr.build/p/mYLv.mp4\",\n nostr_event.id()).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=self.dvm_config)\n\n else:\n # Build an overview of known DVMs and send it to the user\n answer_overview(nostr_event)\n\n except Exception as e:\n print(\"Error in bot \" + str(e))\n\n def handle_nip90_feedback(nostr_event):\n print(nostr_event.as_json())\n try:\n is_encrypted = False\n status = \"\"\n etag = \"\"\n ptag = \"\"\n content = nostr_event.content()\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"status\":\n status = tag.as_vec()[1]\n if len(tag.as_vec()) > 2:\n content = tag.as_vec()[2]\n elif tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"p\":\n ptag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"encrypted\":\n is_encrypted = True\n\n if is_encrypted:\n if ptag == self.keys.public_key().to_hex():\n tags_str = nip04_decrypt(Keys.from_sk_str(dvm_config.PRIVATE_KEY).secret_key(),\n nostr_event.pubkey(), nostr_event.content())\n params = json.loads(tags_str)\n params.append(Tag.parse([\"p\", ptag]).as_vec())\n params.append(Tag.parse([\"encrypted\"]).as_vec())\n event_as_json = json.loads(nostr_event.as_json())\n event_as_json['tags'] = params\n event_as_json['content'] = \"\"\n nostr_event = Event.from_json(json.dumps(event_as_json))\n\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"status\":\n status = tag.as_vec()[1]\n if len(tag.as_vec()) > 2:\n content = tag.as_vec()[2]\n elif tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"content\":\n content = tag.as_vec()[1]\n\n else:\n return\n\n if status == \"success\" or status == \"error\" or status == \"processing\" or status == \"partial\" and content != \"\":\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n if entry is not None and entry['dvm_key'] == nostr_event.pubkey().to_hex():\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry['npub'],\n client=self.client, config=self.dvm_config)\n time.sleep(2.0)\n reply_event = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(entry['npub']),\n content,\n None).to_event(self.keys)\n print(status + \": \" + content)\n print(\n \"[\" + self.NAME + \"] Received reaction from \" + nostr_event.pubkey().to_hex() + \" message to orignal sender \" + user.name)\n send_event(reply_event, client=self.client, dvm_config=dvm_config)\n\n elif status == \"payment-required\" or status == \"partial\":\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"amount\":\n amount_msats = int(tag.as_vec()[1])\n amount = int(amount_msats / 1000)\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n if entry is not None and entry['is_paid'] is False and entry[\n 'dvm_key'] == nostr_event.pubkey().to_hex():\n # if we get a bolt11, we pay and move on\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry[\"npub\"],\n client=self.client, config=self.dvm_config)\n if user.balance >= amount:\n balance = max(user.balance - amount, 0)\n update_sql_table(db=self.dvm_config.DB, npub=user.npub, balance=balance,\n iswhitelisted=user.iswhitelisted, isblacklisted=user.isblacklisted,\n nip05=user.nip05, lud16=user.lud16, name=user.name,\n lastactive=Timestamp.now().as_secs())\n evt = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(entry[\"npub\"]),\n \"Paid \" + str(\n amount) + \" Sats from balance to DVM. New balance is \" +\n str(balance)\n + \" Sats.\\n\",\n None).to_event(self.keys)\n\n print(\n \"[\" + self.NAME + \"] Replying \" + user.name + \" with \\\"scheduled\\\" confirmation\")\n send_event(evt, client=self.client, dvm_config=dvm_config)\n else:\n print(\"Bot payment-required\")\n time.sleep(2.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(entry[\"npub\"]),\n \"Current balance: \" + str(\n user.balance) + \" Sats. Balance of \" + str(\n amount) + \" Sats required. Please zap me with at least \" +\n str(int(amount - user.balance))\n + \" Sats, then try again.\",\n None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=dvm_config)\n return\n\n if len(tag.as_vec()) > 2:\n bolt11 = tag.as_vec()[2]\n # else we create a zap\n else:\n user = get_or_add_user(db=self.dvm_config.DB, npub=nostr_event.pubkey().to_hex(),\n client=self.client, config=self.dvm_config)\n print(\"Paying: \" + user.name)\n bolt11 = zaprequest(user.lud16, amount, \"Zap\", nostr_event, self.keys,\n self.dvm_config,\n \"private\")\n if bolt11 == None:\n print(\"Receiver has no Lightning address\")\n return\n try:\n print(bolt11)\n payment_hash = pay_bolt11_ln_bits(bolt11, self.dvm_config)\n self.job_list[self.job_list.index(entry)]['is_paid'] = True\n print(\"[\" + self.NAME + \"] payment_hash: \" + payment_hash +\n \" Forwarding payment of \" + str(amount) + \" Sats to DVM\")\n except Exception as e:\n print(e)\n\n\n except Exception as e:\n print(e)\n\n def handle_nip90_response_event(nostr_event: Event):\n try:\n ptag = \"\"\n etag = \"\"\n is_encrypted = False\n for tag in nostr_event.tags():\n if tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"p\":\n ptag = tag.as_vec()[1]\n elif tag.as_vec()[0] == \"encrypted\":\n is_encrypted = True\n\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n if entry is not None and entry[\n 'dvm_key'] == nostr_event.pubkey().to_hex():\n print(entry)\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry['npub'],\n client=self.client, config=self.dvm_config)\n\n self.job_list.remove(entry)\n content = nostr_event.content()\n if is_encrypted:\n if ptag == self.keys.public_key().to_hex():\n content = nip04_decrypt(self.keys.secret_key(), nostr_event.pubkey(), content)\n else:\n return\n\n dvms = [x for x in self.dvm_config.SUPPORTED_DVMS if\n x.PUBLIC_KEY == nostr_event.pubkey().to_hex() and x.KIND == nostr_event.kind() - 1000]\n if len(dvms) > 0:\n dvm = dvms[0]\n if dvm.dvm_config.EXTERNAL_POST_PROCESS_TYPE != PostProcessFunctionType.NONE:\n if dvm.dvm_config.EXTERNAL_POST_PROCESS_TYPE == PostProcessFunctionType.LIST_TO_EVENTS:\n content = post_process_list_to_events(content)\n elif dvm.dvm_config.EXTERNAL_POST_PROCESS_TYPE == PostProcessFunctionType.LIST_TO_USERS:\n content = post_process_list_to_users(content)\n\n print(\"[\" + self.NAME + \"] Received results, message to orignal sender \" + user.name)\n time.sleep(1.0)\n reply_event = EventBuilder.new_encrypted_direct_msg(self.keys,\n PublicKey.from_hex(user.npub),\n content,\n None).to_event(self.keys)\n send_event(reply_event, client=self.client, dvm_config=dvm_config)\n\n except Exception as e:\n print(e)\n\n def handle_zap(zap_event):\n print(\"[\" + self.NAME + \"] Zap received\")\n try:\n invoice_amount, zapped_event, sender, message, anon = parse_zap_event_tags(zap_event,\n self.keys, self.NAME,\n self.client, self.dvm_config)\n\n etag = \"\"\n for tag in zap_event.tags():\n if tag.as_vec()[0] == \"e\":\n etag = tag.as_vec()[1]\n\n user = get_or_add_user(self.dvm_config.DB, sender, client=self.client, config=self.dvm_config)\n\n entry = next((x for x in self.job_list if x['event_id'] == etag), None)\n print(entry)\n # print(entry['dvm_key'])\n # print(str(zapped_event.pubkey().to_hex()))\n # print(str(zap_event.pubkey().to_hex()))\n print(sender)\n if entry is not None and entry['is_paid'] is True and entry['dvm_key'] == sender:\n # if we get a bolt11, we pay and move on\n user = get_or_add_user(db=self.dvm_config.DB, npub=entry[\"npub\"],\n client=self.client, config=self.dvm_config)\n\n sender = user.npub\n\n if zapped_event is not None:\n if not anon:\n print(\"[\" + self.NAME + \"] Note Zap received for Bot balance: \" + str(\n invoice_amount) + \" Sats from \" + str(\n user.name))\n update_user_balance(self.dvm_config.DB, sender, invoice_amount, client=self.client,\n config=self.dvm_config)\n\n # a regular note\n elif not anon:\n print(\"[\" + self.NAME + \"] Profile Zap received for Bot balance: \" + str(\n invoice_amount) + \" Sats from \" + str(\n user.name))\n update_user_balance(self.dvm_config.DB, sender, invoice_amount, client=self.client,\n config=self.dvm_config)\n\n except Exception as e:\n print(\"[\" + self.NAME + \"] Error during content decryption:\" + str(e))\n\n def answer_overview(nostr_event):\n message = \"DVMs that I support:\\n\\n\"\n index = 1\n for p in self.dvm_config.SUPPORTED_DVMS:\n if p.PER_UNIT_COST != 0 and p.PER_UNIT_COST is not None:\n message += (str(index) + \" \" + p.NAME + \" \" + p.TASK + \"\\n\\t\" + str(p.FIX_COST) +\n \" Sats + \" + str(p.PER_UNIT_COST) + \" Sats per Second\\n\\n\")\n else:\n message += (str(index) + \" \" + p.NAME + \" \" + p.TASK + \"\\n\\t\" + str(p.FIX_COST) +\n \" Sats\\n\\n\")\n index += 1\n\n time.sleep(3.0)\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n message + \"\\nSelect an Index and provide an input (\"\n \"e.g. \\\"2 A purple ostrich\\\")\\nType \\\"index info\\\" to learn \"\n \"more about each DVM. (e.g. \\\"2 info\\\")\\n\\n\"\n \"Type \\\"balance\\\" to see your current balance\",\n nostr_event.id()).to_event(self.keys)\n\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n def answer_blacklisted(nostr_event):\n # For some reason an admin might blacklist npubs, e.g. for abusing the service\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"Your are currently blocked from all \"\n \"services.\", None).to_event(self.keys)\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n def answer_nip89(nostr_event, index):\n info = print_dvm_info(self.client, index)\n time.sleep(2.0)\n if info is not None:\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n info, None).to_event(self.keys)\n else:\n evt = EventBuilder.new_encrypted_direct_msg(self.keys, nostr_event.pubkey(),\n \"No NIP89 Info found for \" +\n self.dvm_config.SUPPORTED_DVMS[index].NAME,\n None).to_event(self.keys)\n\n send_event(evt, client=self.client, dvm_config=dvm_config)\n\n def build_params(decrypted_text, nostr_event, index):\n tags = []\n splitzero = decrypted_text.split(' -')\n split = splitzero[0].split(' ')\n # If only a command without parameters is sent, we assume no input is required, and that means the dvm might take in the user as input (e.g. for content discovery)\n if len(split) == 1:\n remaining_text = decrypted_text.replace(split[0], \"\")\n params = remaining_text.split(\" -\")\n tag = Tag.parse([\"param\", \"user\", nostr_event.pubkey().to_hex()])\n tags.append(tag)\n for i in params:\n print(i)\n if i != \" \":\n try:\n split = i.split(\" \")\n if len(split) > 1:\n param = str(split[0])\n print(str(param))\n value = str(split[1])\n print(str(value))\n if param == \"cashu\":\n tag = Tag.parse([param, value])\n else:\n if param == \"user\":\n if value.startswith(\"@\") or value.startswith(\"nostr:\") or value.startswith(\n \"npub\"):\n value = PublicKey.from_bech32(\n value.replace(\"@\", \"\").replace(\"nostr:\", \"\")).to_hex()\n tag = Tag.parse([\"param\", param, value])\n tags.append(tag)\n except Exception as e:\n print(e)\n print(\"Couldn't add \" + str(i))\n output = Tag.parse([\"output\", \"text/plain\"])\n tags.append(output)\n relay_list = [\"relays\"]\n for relay in self.dvm_config.RELAY_LIST:\n relay_list.append(relay)\n relays = Tag.parse(relay_list)\n tags.append(relays)\n\n return tags\n\n tags = []\n command = decrypted_text.replace(split[0] + \" \", \"\")\n split = command.split(\" -\")\n input = split[0].rstrip()\n if input.startswith(\"http\"):\n temp = input.split(\" \")\n if len(temp) > 1:\n input_type = \"url\"\n i_tag1 = Tag.parse([\"i\", temp[0], input_type])\n tags.append(i_tag1)\n input_type = \"text\"\n i_tag2 = Tag.parse([\"i\", input.replace(temp[0], \"\").lstrip(), input_type])\n tags.append(i_tag2)\n else:\n input_type = \"url\"\n i_tag = Tag.parse([\"i\", input, input_type])\n tags.append(i_tag)\n elif (input.startswith(\"nevent\") or input.startswith(\"nostr:nevent\") or input.startswith(\"note\") or\n input.startswith(\"nostr:note\")):\n input_type = \"event\"\n if str(input).startswith('note'):\n event_id = EventId.from_bech32(input)\n elif str(input).startswith(\"nevent\"):\n event_id = Nip19Event.from_bech32(input).event_id()\n elif str(input).startswith('nostr:note'):\n event_id = EventId.from_nostr_uri(input)\n elif str(input).startswith(\"nostr:nevent\"):\n event_id = Nip19Event.from_nostr_uri(input).event_id()\n else:\n event_id = EventId.from_hex(input)\n i_tag = Tag.parse([\"i\", event_id.to_hex(), input_type])\n tags.append(i_tag)\n else:\n print(input)\n input_type = \"text\"\n i_tag = Tag.parse([\"i\", input, input_type])\n tags.append(i_tag)\n\n alt_tag = Tag.parse([\"alt\", self.dvm_config.SUPPORTED_DVMS[index].TASK])\n tags.append(alt_tag)\n relaylist = [\"relays\"]\n for relay in self.dvm_config.RELAY_LIST:\n relaylist.append(relay)\n relays_tag = Tag.parse(relaylist)\n tags.append(relays_tag)\n output_tag = Tag.parse([\"output\", \"text/plain\"])\n tags.append(output_tag)\n remaining_text = command.replace(input, \"\")\n print(remaining_text)\n\n params = remaining_text.rstrip().split(\" -\")\n\n for i in params:\n print(i)\n if i != \" \":\n try:\n split = i.split(\" \")\n if len(split) > 1:\n param = str(split[0])\n print(str(param))\n value = str(split[1])\n print(str(value))\n if param == \"cashu\":\n tag = Tag.parse([param, value])\n else:\n if param == \"user\":\n if value.startswith(\"@\") or value.startswith(\"nostr:\") or value.startswith(\"npub\"):\n value = PublicKey.from_bech32(\n value.replace(\"@\", \"\").replace(\"nostr:\", \"\")).to_hex()\n tag = Tag.parse([\"param\", param, value])\n tags.append(tag)\n print(\"Added params: \" + str(tag.as_vec()))\n except Exception as e:\n print(e)\n print(\"Couldn't add \" + str(i))\n\n return tags\n\n def print_dvm_info(client, index):\n pubkey = self.dvm_config.SUPPORTED_DVMS[index].dvm_config.PUBLIC_KEY\n kind = self.dvm_config.SUPPORTED_DVMS[index].KIND\n nip89content_str = nip89_fetch_events_pubkey(client, pubkey, kind)\n print(nip89content_str)\n if nip89content_str is not None:\n nip89content = json.loads(nip89content_str)\n info = \"\"\n cashu_accepted = False\n encryption_supported = False\n\n if nip89content.get(\"name\"):\n info += \"Name: \" + nip89content.get(\"name\") + \"\\n\"\n if nip89content.get(\"image\"):\n info += nip89content.get(\"image\") + \"\\n\"\n if nip89content.get(\"about\"):\n info += \"About:\\n\" + nip89content.get(\"about\") + \"\\n\\n\"\n if nip89content.get(\"cashuAccepted\"):\n cashu_accepted = str(nip89content.get(\"cashuAccepted\"))\n if nip89content.get(\"encryptionSupported\"):\n encryption_supported = str(nip89content.get(\"encryptionSupported\"))\n\n info += \"Encryption supported: \" + str(encryption_supported) + \"\\n\"\n info += \"Cashu accepted: \" + str(cashu_accepted) + \"\\n\\n\"\n if nip89content.get(\"nip90Params\"):\n params = nip89content[\"nip90Params\"]\n info += \"\\nParameters:\\n\"\n for param in params:\n info += \"-\" + param + '\\n'\n info += \"Required: \" + str(params[param]['required']) + '\\n'\n info += \"Possible Values: \" + json.dumps(params[param]['values']) + '\\n\\n'\n return info\n\n return None\n\n self.client.handle_notifications(NotificationHandler())\n\n try:\n while True:\n time.sleep(1.0)\n except KeyboardInterrupt:\n print('Stay weird!')\n os.kill(os.getpid(), signal.SIGTERM)" }, { "identifier": "videogeneration_replicate_svd", "path": "nostr_dvm/tasks/videogeneration_replicate_svd.py", "snippet": "class VideoGenerationReplicateSVD(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_VIDEO\n TASK: str = \"image-to-video\"\n FIX_COST: float = 120\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "imagegeneration_replicate_sdxl", "path": "nostr_dvm/tasks/imagegeneration_replicate_sdxl.py", "snippet": "class ImageGenerationReplicateSDXL(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE\n TASK: str = \"text-to-image\"\n FIX_COST: float = 120\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textgeneration_llmlite", "path": "nostr_dvm/tasks/textgeneration_llmlite.py", "snippet": "class TextGenerationLLMLite(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT\n TASK: str = \"text-to-text\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "trending_notes_nostrband", "path": "nostr_dvm/tasks/trending_notes_nostrband.py", "snippet": "class TrendingNotesNostrBand(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_CONTENT_DISCOVERY\n TASK: str = \"trending-content\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\n def post_process(self, result, event):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "discovery_inactive_follows", "path": "nostr_dvm/tasks/discovery_inactive_follows.py", "snippet": "class DiscoverInactiveFollows(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_PEOPLE_DISCOVERY\n TASK: str = \"inactive-follows\"\n FIX_COST: float = 50\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\n def scanList(users, instance, i, st, notactivesince):\n def post_process(self, result, event):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "translation_google", "path": "nostr_dvm/tasks/translation_google.py", "snippet": "class TranslationGoogle(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_TRANSLATE_TEXT\n TASK: str = \"translation\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textextraction_pdf", "path": "nostr_dvm/tasks/textextraction_pdf.py", "snippet": "class TextExtractionPDF(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT\n TASK: str = \"pdf-to-text\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "translation_libretranslate", "path": "nostr_dvm/tasks/translation_libretranslate.py", "snippet": "class TranslationLibre(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_TRANSLATE_TEXT\n TASK: str = \"translation\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None, task=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textextraction_google", "path": "nostr_dvm/tasks/textextraction_google.py", "snippet": "class SpeechToTextGoogle(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_EXTRACT_TEXT\n TASK: str = \"speech-to-text\"\n FIX_COST: float = 10\n PER_UNIT_COST: float = 0.1\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "convert_media", "path": "nostr_dvm/tasks/convert_media.py", "snippet": "class MediaConverter(DVMTaskInterface):\n KIND = EventDefinitions.KIND_NIP90_CONVERT_VIDEO\n TASK = \"convert\"\n FIX_COST = 20\n PER_UNIT_COST = 0.1\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "imagegeneration_openai_dalle", "path": "nostr_dvm/tasks/imagegeneration_openai_dalle.py", "snippet": "class ImageGenerationDALLE(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE\n TASK: str = \"text-to-image\"\n FIX_COST: float = 120\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "texttospeech", "path": "nostr_dvm/tasks/texttospeech.py", "snippet": "class TextToSpeech(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_TEXT_TO_SPEECH\n TASK: str = \"text-to-speech\"\n FIX_COST: float = 50\n PER_UNIT_COST = 0.5\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "imagegeneration_sd21_mlx", "path": "nostr_dvm/tasks/imagegeneration_sd21_mlx.py", "snippet": "class ImageGenerationMLX(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_IMAGE\n TASK: str = \"text-to-image\"\n FIX_COST: float = 120\n B, H, W, C = x.shape\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "advanced_search", "path": "nostr_dvm/tasks/advanced_search.py", "snippet": "class AdvancedSearch(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_CONTENT_SEARCH\n TASK: str = \"search-content\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\n def post_process(self, result, event):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "textgeneration_huggingchat", "path": "nostr_dvm/tasks/textgeneration_huggingchat.py", "snippet": "class TextGenerationHuggingChat(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_GENERATE_TEXT\n TASK: str = \"text-to-text\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "summarization_huggingchat", "path": "nostr_dvm/tasks/summarization_huggingchat.py", "snippet": "class TextSummarizationHuggingChat(DVMTaskInterface):\n KIND: int = EventDefinitions.KIND_NIP90_SUMMARIZE_TEXT\n TASK: str = \"summarization\"\n FIX_COST: float = 0\n def __init__(self, name, dvm_config: DVMConfig, nip89config: NIP89Config,\n admin_config: AdminConfig = None, options=None):\n def is_input_supported(self, tags, client=None, dvm_config=None):\n def create_request_from_nostr_event(self, event, client=None, dvm_config=None):\n def process(self, request_form):\ndef build_example(name, identifier, admin_config):" }, { "identifier": "AdminConfig", "path": "nostr_dvm/utils/admin_utils.py", "snippet": "class AdminConfig:\n REBROADCAST_NIP89: bool = False\n UPDATE_PROFILE: bool = False\n DELETE_NIP89: bool = False\n WHITELISTUSER: bool = False\n UNWHITELISTUSER: bool = False\n BLACKLISTUSER: bool = False\n DELETEUSER: bool = False\n LISTDATABASE: bool = False\n ClEANDB: bool = False\n\n USERNPUB: str = \"\"\n LUD16: str = \"\"\n\n EVENTID: str = \"\"\n PRIVKEY: str = \"\"" }, { "identifier": "keep_alive", "path": "nostr_dvm/utils/backend_utils.py", "snippet": "def keep_alive():\n try:\n while True:\n time.sleep(10)\n except KeyboardInterrupt:\n os.kill(os.getpid(), signal.SIGKILL)\n exit(1)" }, { "identifier": "EventDefinitions", "path": "nostr_dvm/utils/definitions.py", "snippet": "class EventDefinitions:\n KIND_DM = 4\n KIND_ZAP = 9735\n KIND_ANNOUNCEMENT = 31990\n KIND_NIP94_METADATA = 1063\n KIND_FEEDBACK = 7000\n KIND_NIP90_EXTRACT_TEXT = 5000\n KIND_NIP90_RESULT_EXTRACT_TEXT = KIND_NIP90_EXTRACT_TEXT + 1000\n KIND_NIP90_SUMMARIZE_TEXT = 5001\n KIND_NIP90_RESULT_SUMMARIZE_TEXT = KIND_NIP90_SUMMARIZE_TEXT + 1000\n KIND_NIP90_TRANSLATE_TEXT = 5002\n KIND_NIP90_RESULT_TRANSLATE_TEXT = KIND_NIP90_TRANSLATE_TEXT + 1000\n KIND_NIP90_GENERATE_TEXT = 5050\n KIND_NIP90_RESULT_GENERATE_TEXT = KIND_NIP90_GENERATE_TEXT + 1000\n KIND_NIP90_GENERATE_IMAGE = 5100\n KIND_NIP90_RESULT_GENERATE_IMAGE = KIND_NIP90_GENERATE_IMAGE + 1000\n KIND_NIP90_CONVERT_VIDEO = 5200\n KIND_NIP90_RESULT_CONVERT_VIDEO = KIND_NIP90_CONVERT_VIDEO + 1000\n KIND_NIP90_GENERATE_VIDEO = 5202\n KIND_NIP90_TEXT_TO_SPEECH = 5250\n KIND_NIP90_RESULT_TEXT_TO_SPEECH = KIND_NIP90_TEXT_TO_SPEECH + 1000\n KIND_NIP90_RESULT_GENERATE_VIDEO = KIND_NIP90_GENERATE_VIDEO + 1000\n KIND_NIP90_CONTENT_DISCOVERY = 5300\n KIND_NIP90_RESULT_CONTENT_DISCOVERY = KIND_NIP90_CONTENT_DISCOVERY + 1000\n KIND_NIP90_PEOPLE_DISCOVERY = 5301\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY = KIND_NIP90_PEOPLE_DISCOVERY + 1000\n KIND_NIP90_CONTENT_SEARCH = 5302\n KIND_NIP90_RESULTS_CONTENT_SEARCH = KIND_NIP90_CONTENT_SEARCH + 1000\n KIND_NIP90_GENERIC = 5999\n KIND_NIP90_RESULT_GENERIC = KIND_NIP90_GENERIC + 1000\n ANY_RESULT = [KIND_NIP90_RESULT_EXTRACT_TEXT,\n KIND_NIP90_RESULT_SUMMARIZE_TEXT,\n KIND_NIP90_RESULT_TRANSLATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_TEXT,\n KIND_NIP90_RESULT_GENERATE_IMAGE,\n KIND_NIP90_CONTENT_DISCOVERY,\n KIND_NIP90_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_CONVERT_VIDEO,\n KIND_NIP90_RESULT_CONTENT_DISCOVERY,\n KIND_NIP90_RESULT_PEOPLE_DISCOVERY,\n KIND_NIP90_RESULT_GENERATE_VIDEO,\n KIND_NIP90_RESULT_GENERIC]" }, { "identifier": "DVMConfig", "path": "nostr_dvm/utils/dvmconfig.py", "snippet": "class DVMConfig:\n SUPPORTED_DVMS = []\n PRIVATE_KEY: str = \"\"\n PUBLIC_KEY: str = \"\"\n FIX_COST: float = None\n PER_UNIT_COST: float = None\n\n RELAY_LIST = [\"wss://relay.damus.io\", \"wss://nostr-pub.wellorder.net\", \"wss://nos.lol\", \"wss://nostr.wine\",\n \"wss://nostr.mom\", \"wss://nostr.oxtr.dev\", \"wss://relay.nostr.bg\",\n \"wss://relay.f7z.io\", \"wss://pablof7z.nostr1.com\", \"wss://relay.nostr.net\", \"wss://140.f7z.io\",\n \"wss://relay.snort.social\", \"wss://offchain.pub/\", \"wss://relay.nostr.band\"]\n\n RELAY_TIMEOUT = 5\n EXTERNAL_POST_PROCESS_TYPE = PostProcessFunctionType.NONE # Leave this on None, except the DVM is external\n LNBITS_INVOICE_KEY = '' # Will all automatically generated by default, or read from .env\n LNBITS_ADMIN_KEY = '' # In order to pay invoices, e.g. from the bot to DVMs, or reimburse users.\n LNBITS_URL = 'https://lnbits.com'\n LN_ADDRESS = ''\n SCRIPT = ''\n IDENTIFIER = ''\n USE_OWN_VENV = True # Make an own venv for each dvm's process function.Disable if you want to install packages into main venv. Only recommended if you dont want to run dvms with different dependency versions\n DB: str\n NEW_USER_BALANCE: int = 0 # Free credits for new users\n NIP89: NIP89Config\n SHOW_RESULT_BEFORE_PAYMENT: bool = False # if this is true show results even when not paid right after autoprocess" }, { "identifier": "build_external_dvm", "path": "nostr_dvm/utils/external_dvm_utils.py", "snippet": "def build_external_dvm(pubkey, task, kind, fix_cost, per_unit_cost, config,\n external_post_process=PostProcessFunctionType.NONE):\n dvm_config = DVMConfig()\n dvm_config.PUBLIC_KEY = PublicKey.from_hex(pubkey).to_hex()\n dvm_config.FIX_COST = fix_cost\n dvm_config.PER_UNIT_COST = per_unit_cost\n dvm_config.EXTERNAL_POST_PROCESS_TYPE = external_post_process\n\n opts = (Options().wait_for_send(True).send_timeout(timedelta(seconds=config.RELAY_TIMEOUT))\n .skip_disconnected_relays(True))\n keys = Keys.from_sk_str(config.PRIVATE_KEY)\n signer = ClientSigner.keys(keys)\n client = Client.with_opts(signer, opts)\n\n\n for relay in config.RELAY_LIST:\n client.add_relay(relay)\n client.connect()\n\n nip89content_str = nip89_fetch_events_pubkey(client, pubkey, kind)\n name = \"External DVM\"\n image = \"https://image.nostr.build/c33ca6fc4cc038ca4adb46fdfdfda34951656f87ee364ef59095bae1495ce669.jpg\"\n about = \"An External DVM with no info\"\n nip90params = {}\n encryption_supported = False\n cashu_accepted = False\n\n if nip89content_str is not None:\n print(nip89content_str)\n nip89content = json.loads(nip89content_str)\n if nip89content.get(\"name\"):\n name = nip89content.get(\"name\")\n if nip89content.get(\"image\"):\n image = nip89content.get(\"image\")\n if nip89content.get(\"about\"):\n about = nip89content.get(\"about\")\n if nip89content.get(\"nip90Params\"):\n nip90params = nip89content[\"nip90Params\"]\n if nip89content.get(\"encryptionSupported\"):\n encryption_supported = nip89content[\"encryptionSupported\"]\n if nip89content.get(\"cashuAccepted\"):\n cashu_accepted = nip89content[\"cashuAccepted\"]\n else:\n print(\"No NIP89 set for \"+ name)\n nip89info = {\n \"name\": name,\n \"image\": image,\n \"about\": about,\n \"encryptionSupported\": encryption_supported,\n \"cashuAccepted\": cashu_accepted,\n \"nip90Params\": nip90params\n }\n nip89config = NIP89Config()\n nip89config.KIND = kind\n nip89config.CONTENT = json.dumps(nip89info)\n\n interface = DVMTaskInterface(name=name, dvm_config=dvm_config, nip89config=nip89config, task=task)\n interface.SUPPORTS_ENCRYPTION = encryption_supported\n interface.ACCEPTS_CASHU = cashu_accepted\n\n return interface" }, { "identifier": "check_and_set_private_key", "path": "nostr_dvm/utils/nostr_utils.py", "snippet": "def check_and_set_private_key(identifier):\n if not os.getenv(\"DVM_PRIVATE_KEY_\" + identifier.upper()):\n pk = Keys.generate().secret_key().to_hex()\n add_pk_to_env_file(\"DVM_PRIVATE_KEY_\" + identifier.upper(), pk)\n return pk\n else:\n return os.getenv(\"DVM_PRIVATE_KEY_\" + identifier.upper())" }, { "identifier": "PostProcessFunctionType", "path": "nostr_dvm/utils/output_utils.py", "snippet": "class PostProcessFunctionType:\n NONE = 0\n LIST_TO_USERS = 1\n LIST_TO_EVENTS = 2" }, { "identifier": "check_and_set_ln_bits_keys", "path": "nostr_dvm/utils/zap_utils.py", "snippet": "def check_and_set_ln_bits_keys(identifier, npub):\n if not os.getenv(\"LNBITS_INVOICE_KEY_\" + identifier.upper()):\n invoicekey, adminkey, walletid, userid, success = create_lnbits_account(identifier)\n add_key_to_env_file(\"LNBITS_INVOICE_KEY_\" + identifier.upper(), invoicekey)\n add_key_to_env_file(\"LNBITS_ADMIN_KEY_\" + identifier.upper(), adminkey)\n add_key_to_env_file(\"LNBITS_USER_ID_\" + identifier.upper(), userid)\n add_key_to_env_file(\"LNBITS_WALLET_ID_\" + identifier.upper(), userid)\n\n lnaddress = \"\"\n pin = \"\"\n if os.getenv(\"NOSTDRESS_DOMAIN\") and success != \"failed\":\n print(os.getenv(\"NOSTDRESS_DOMAIN\"))\n lnaddress, pin = make_ln_address_nostdress(identifier, npub, \" \", os.getenv(\"NOSTDRESS_DOMAIN\"))\n add_key_to_env_file(\"LNADDRESS_\" + identifier.upper(), lnaddress)\n add_key_to_env_file(\"LNADDRESS_PIN_\" + identifier.upper(), pin)\n\n return invoicekey, adminkey, userid, walletid, lnaddress\n else:\n return (os.getenv(\"LNBITS_INVOICE_KEY_\" + identifier.upper()),\n os.getenv(\"LNBITS_ADMIN_KEY_\" + identifier.upper()),\n os.getenv(\"LNBITS_USER_ID_\" + identifier.upper()),\n os.getenv(\"LNBITS_WALLET_ID_\" + identifier.upper()),\n os.getenv(\"LNADDRESS_\" + identifier.upper()))" } ]
import os import dotenv from pathlib import Path from sys import platform from nostr_dvm.bot import Bot from nostr_dvm.tasks import videogeneration_replicate_svd, imagegeneration_replicate_sdxl, textgeneration_llmlite, \ trending_notes_nostrband, discovery_inactive_follows, translation_google, textextraction_pdf, \ translation_libretranslate, textextraction_google, convert_media, imagegeneration_openai_dalle, texttospeech, \ imagegeneration_sd21_mlx, advanced_search, textgeneration_huggingchat, summarization_huggingchat from nostr_dvm.utils.admin_utils import AdminConfig from nostr_dvm.utils.backend_utils import keep_alive from nostr_dvm.utils.definitions import EventDefinitions from nostr_dvm.utils.dvmconfig import DVMConfig from nostr_dvm.utils.external_dvm_utils import build_external_dvm from nostr_dvm.utils.nostr_utils import check_and_set_private_key from nostr_dvm.utils.output_utils import PostProcessFunctionType from nostr_dvm.utils.zap_utils import check_and_set_ln_bits_keys from nostr_sdk import Keys
13,007
def playground(): # We will run an optional bot that can communicate with the DVMs # Note this is very basic for now and still under development bot_config = DVMConfig() bot_config.PRIVATE_KEY = check_and_set_private_key("bot") npub = Keys.from_sk_str(bot_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys("bot", npub) bot_config.LNBITS_INVOICE_KEY = invoice_key bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back bot_config.LNBITS_URL = os.getenv("LNBITS_HOST") # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast # their NIP89 announcement # You can create individual admins configs and hand them over when initializing the dvm, # for example to whilelist users or add to their balance. # If you use this global config, options will be set for all dvms that use it. admin_config = AdminConfig() admin_config.REBROADCAST_NIP89 = False admin_config.LUD16 = lnaddress # Set rebroadcast to true once you have set your NIP89 descriptions and d tags. You only need to rebroadcast once you # want to update your NIP89 descriptions # Update the DVMs (not the bot) profile. For example after you updated the NIP89 or the lnaddress, you can automatically update profiles here. admin_config.UPDATE_PROFILE = False # Spawn some DVMs in the playground and run them # You can add arbitrary DVMs there and instantiate them here # Spawn DVM1 Kind 5000: A local Text Extractor from PDFs pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config) # If we don't add it to the bot, the bot will not provide access to the DVM pdfextractor.run() # Spawn DVM2 Kind 5002 Local Text TranslationGoogle, calling the free Google API. translator = translation_google.build_example("Google Translator", "google_translator", admin_config) bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot translator.run() # Spawn DVM3 Kind 5002 Local Text TranslationLibre, calling the free LibreTranslateApi, as an alternative. # This will only run and appear on the bot if an endpoint is set in the .env if os.getenv("LIBRE_TRANSLATE_ENDPOINT") is not None and os.getenv("LIBRE_TRANSLATE_ENDPOINT") != "": libre_translator = translation_libretranslate.build_example("Libre Translator", "libre_translator", admin_config) bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot libre_translator.run() # Spawn DVM4, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "": dalle = imagegeneration_openai_dalle.build_example("Dall-E 3", "dalle3", admin_config) bot_config.SUPPORTED_DVMS.append(dalle) dalle.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "":
def playground(): # We will run an optional bot that can communicate with the DVMs # Note this is very basic for now and still under development bot_config = DVMConfig() bot_config.PRIVATE_KEY = check_and_set_private_key("bot") npub = Keys.from_sk_str(bot_config.PRIVATE_KEY).public_key().to_bech32() invoice_key, admin_key, wallet_id, user_id, lnaddress = check_and_set_ln_bits_keys("bot", npub) bot_config.LNBITS_INVOICE_KEY = invoice_key bot_config.LNBITS_ADMIN_KEY = admin_key # The dvm might pay failed jobs back bot_config.LNBITS_URL = os.getenv("LNBITS_HOST") # Generate an optional Admin Config, in this case, whenever we give our DVMs this config, they will (re)broadcast # their NIP89 announcement # You can create individual admins configs and hand them over when initializing the dvm, # for example to whilelist users or add to their balance. # If you use this global config, options will be set for all dvms that use it. admin_config = AdminConfig() admin_config.REBROADCAST_NIP89 = False admin_config.LUD16 = lnaddress # Set rebroadcast to true once you have set your NIP89 descriptions and d tags. You only need to rebroadcast once you # want to update your NIP89 descriptions # Update the DVMs (not the bot) profile. For example after you updated the NIP89 or the lnaddress, you can automatically update profiles here. admin_config.UPDATE_PROFILE = False # Spawn some DVMs in the playground and run them # You can add arbitrary DVMs there and instantiate them here # Spawn DVM1 Kind 5000: A local Text Extractor from PDFs pdfextractor = textextraction_pdf.build_example("PDF Extractor", "pdf_extractor", admin_config) # If we don't add it to the bot, the bot will not provide access to the DVM pdfextractor.run() # Spawn DVM2 Kind 5002 Local Text TranslationGoogle, calling the free Google API. translator = translation_google.build_example("Google Translator", "google_translator", admin_config) bot_config.SUPPORTED_DVMS.append(translator) # We add translator to the bot translator.run() # Spawn DVM3 Kind 5002 Local Text TranslationLibre, calling the free LibreTranslateApi, as an alternative. # This will only run and appear on the bot if an endpoint is set in the .env if os.getenv("LIBRE_TRANSLATE_ENDPOINT") is not None and os.getenv("LIBRE_TRANSLATE_ENDPOINT") != "": libre_translator = translation_libretranslate.build_example("Libre Translator", "libre_translator", admin_config) bot_config.SUPPORTED_DVMS.append(libre_translator) # We add translator to the bot libre_translator.run() # Spawn DVM4, this one requires an OPENAI API Key and balance with OpenAI, you will move the task to them and pay # per call. Make sure you have enough balance and the DVM's cost is set higher than what you pay yourself, except, you know, # you're being generous. if os.getenv("OPENAI_API_KEY") is not None and os.getenv("OPENAI_API_KEY") != "": dalle = imagegeneration_openai_dalle.build_example("Dall-E 3", "dalle3", admin_config) bot_config.SUPPORTED_DVMS.append(dalle) dalle.run() if os.getenv("REPLICATE_API_TOKEN") is not None and os.getenv("REPLICATE_API_TOKEN") != "":
sdxlreplicate = imagegeneration_replicate_sdxl.build_example("Stable Diffusion XL", "replicate_sdxl", admin_config)
2
2023-11-17 18:32:56+00:00
16k
IBM/oper8
oper8/watch_manager/python_watch_manager/filters/filters.py
[ { "identifier": "KubeEventType", "path": "oper8/deploy_manager/kube_event.py", "snippet": "class KubeEventType(Enum):\n \"\"\"Enum for all possible kubernetes event types\"\"\"\n\n DELETED = \"DELETED\"\n MODIFIED = \"MODIFIED\"\n ADDED = \"ADDED\"" }, { "identifier": "ManagedObject", "path": "oper8/managed_object.py", "snippet": "class ManagedObject: # pylint: disable=too-many-instance-attributes\n \"\"\"Basic struct to represent a managed kubernetes object\"\"\"\n\n def __init__(self, definition):\n self.kind = definition.get(\"kind\")\n self.metadata = definition.get(\"metadata\", {})\n self.name = self.metadata.get(\"name\")\n self.namespace = self.metadata.get(\"namespace\")\n self.uid = self.metadata.get(\"uid\", uuid.uuid4())\n self.resource_version = self.metadata.get(\"resourceVersion\")\n self.api_version = definition.get(\"apiVersion\")\n self.definition = definition\n\n # If resource is not list then check name\n if KUBE_LIST_IDENTIFIER not in self.kind:\n assert self.name is not None, \"No name found\"\n\n assert self.kind is not None, \"No kind found\"\n assert self.api_version is not None, \"No apiVersion found\"\n\n def get(self, *args, **kwargs):\n \"\"\"Pass get calls to the objects definition\"\"\"\n return self.definition.get(*args, **kwargs)\n\n def __str__(self):\n return f\"{self.api_version}/{self.kind}/{self.name}\"\n\n def __repr__(self):\n return str(self)\n\n def __hash__(self):\n \"\"\"Hash explicitly excludes the definition so that the object's\n identifier in a map can be based only on the unique identifier of the\n resource in the cluster. If the original resource did not provide a unique\n identifier then use the apiVersion, kind, and name\n \"\"\"\n return hash(self.metadata.get(\"uid\", str(self)))\n\n def __eq__(self, other):\n return hash(self) == hash(other)" }, { "identifier": "ReconcileManager", "path": "oper8/reconcile.py", "snippet": "class ReconcileManager: # pylint: disable=too-many-lines\n \"\"\"This class manages reconciliations for an instance of Oper8. It's\n primary function is to run reconciles given a CR manifest, Controller,\n and the current cluster state via a DeployManager.\n \"\"\"\n\n ## Construction ############################################################\n\n def __init__(\n self,\n home_dir: str = None,\n deploy_manager: Optional[DeployManagerBase] = None,\n enable_vcs: Optional[bool] = None,\n reimport_controller: Optional[bool] = True,\n ):\n \"\"\"The constructor sets up the properties used across every\n reconcile and checks that the current config is valid.\n\n Args:\n home_dir: Optional[str]=None\n The root directory for importing controllers or VCS checkout\n deploy_manager: Optional[DeployManager]=None\n Deploy manager to use. If not given, a new DeployManager will\n be created for each reconcile.\n enable_vcs: Optional[bool]=True\n Parameter to manually control the state of VCS on a per instance\n basis\n reimport_controller: Optional[bool]=None\n Parameter to manually control if a controller needs to be reimported each\n reconcile.\n \"\"\"\n\n if home_dir:\n self.home_dir = home_dir\n elif config.vcs.enabled:\n self.home_dir = config.vcs.repo\n else:\n self.home_dir = os.getcwd()\n\n self.vcs = None\n\n # If enable_vcs is not provided than default to\n # config\n if enable_vcs is None:\n enable_vcs = config.vcs.enabled\n\n if enable_vcs:\n assert_config(\n config.vcs.repo,\n \"Can not enable vcs without supply source repo at vcs.repo\",\n )\n assert_config(\n config.vcs.dest,\n \"Cannot require enable vcs without providing a destination\",\n )\n vcs_checkout_methods = [method.value for method in VCSCheckoutMethod]\n assert_config(\n config.vcs.checkout_method in vcs_checkout_methods,\n f\"VCS checkout method must be one of the following {vcs_checkout_methods}\",\n )\n\n self.vcs = VCS(self.home_dir)\n\n # Ensure config is setup correctly for strict_versioning\n if config.strict_versioning:\n assert_config(\n config.supported_versions is not None,\n \"Must provide supported_versions with strict_versioning=True\",\n )\n assert_config(\n config.vcs.field is not None,\n \"Must provide vcs.field with strict_versioning=True\",\n )\n\n self.deploy_manager = deploy_manager\n self.reimport_controller = reimport_controller\n\n ## Reconciliation ############################################################\n\n @alog.logged_function(log.info)\n @alog.timed_function(log.info, \"Reconcile finished in: \")\n def reconcile(\n self,\n controller_info: CONTROLLER_INFO,\n resource: Union[dict, aconfig.Config],\n is_finalizer: bool = False,\n ) -> ReconciliationResult:\n \"\"\"This is the main entrypoint for reconciliations and contains the\n core implementation. The general reconcile path is as follows:\n\n 1. Parse the raw CR manifest\n 2. Setup logging based on config with overrides from CR\n 3. Check if the CR is paused and for strict versioning\n 4. Setup directory if VCS is enabled\n 5. Import and construct the Controller\n 6. Setup the DeployManager and Session objects\n 7. Run the Controller reconcile\n\n Args:\n controller_info: CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n resource: Union[dict, aconfig.Config]\n A raw representation of the resource to be reconciled\n is_finalizer: bool=False\n Whether the resource is being deleted\n\n Returns:\n reconcile_result: ReconciliationResult\n The result of the reconcile\n \"\"\"\n\n # Parse the full CR content\n cr_manifest = self.parse_manifest(resource)\n\n # generate id unique to this session\n reconcile_id = self.generate_id()\n\n # Initialize logging prior to any other work\n self.configure_logging(cr_manifest, reconcile_id)\n\n # If paused, do nothing and don't requeue\n if self._is_paused(cr_manifest):\n log.info(\"CR is paused. Exiting reconciliation\")\n result = ReconciliationResult(requeue=False, requeue_params=RequeueParams())\n return result\n\n # Check strict versioning before continuing\n if config.strict_versioning:\n self._check_strict_versioning(cr_manifest)\n\n # Check if VCS is enabled and then attempt to checkout\n if config.vcs.enabled:\n self.setup_vcs(cr_manifest)\n\n # Import controller and setup the instance\n controller = self.setup_controller(controller_info)\n\n # Configure deploy manager on a per reconcile basis for\n # owner references unless a manager is provided on initialization\n deploy_manager = self.setup_deploy_manager(cr_manifest)\n\n # Setup Session\n session = self.setup_session(\n controller, cr_manifest, deploy_manager, reconcile_id\n )\n\n # Run the controller reconcile\n result = self.run_controller(controller, session, is_finalizer)\n\n return result\n\n def safe_reconcile(\n self,\n controller_info: CONTROLLER_INFO,\n resource: dict,\n is_finalizer: bool = False,\n ) -> ReconciliationResult:\n \"\"\"\n This function calls out to reconcile but catches any errors thrown. This\n function guarantees a safe result which is needed by some Watch Managers\n\n Args:\n controller_info: CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n resource: Union[dict, aconfig.Config]\n A raw representation of the reconcile\n is_finalize: bool=False\n Whether the resource is being deleted\n\n Returns:\n reconcile_result: ReconciliationResult\n The result of the reconcile\n\n \"\"\"\n\n try:\n return self.reconcile(controller_info, resource, is_finalizer)\n\n # VCSMultiProcessError is an expected error caused by oper8 which should\n # not be handled by the exception handling code\n except VCSMultiProcessError as exc:\n # Requeue after ~7.5 seconds. Add randomness to avoid\n # repeated conflicts\n requeue_time = 5 + random.uniform(0, 5)\n params = RequeueParams(\n requeue_after=datetime.timedelta(seconds=requeue_time)\n )\n log.debug(\"VCS Multiprocessing Error Detected: {%s}\", exc, exc_info=True)\n log.warning(\n \"VCS Setup failed due to other process. Requeueing in %ss\",\n requeue_time,\n )\n return ReconciliationResult(\n requeue=True, requeue_params=params, exception=exc\n )\n\n # Capture all generic exceptions\n except Exception as exc: # pylint: disable=broad-except\n log.warning(\"Handling caught error in reconcile: %s\", exc, exc_info=True)\n error = exc\n\n if config.manage_status:\n try:\n self._update_error_status(resource, error)\n log.debug(\"Update CR status with error message\")\n except Exception as exc: # pylint: disable=broad-except\n log.error(\"Failed to update status: %s\", exc, exc_info=True)\n\n # If we got to this return it means there was an\n # exception during reconcile and we should requeue\n # with the default backoff period\n log.info(\"Requeuing CR due to error during reconcile\")\n return ReconciliationResult(\n requeue=True, requeue_params=RequeueParams(), exception=error\n )\n\n ## Reconciliation Stages ############################################################\n\n @classmethod\n def parse_manifest(cls, resource: Union[dict, aconfig.Config]) -> aconfig.Config:\n \"\"\"Parse a raw resource into an aconfig Config\n\n Args:\n resource: Union[dict, aconfig.Config])\n The resource to be parsed into a manifest\n\n Returns\n cr_manifest: aconfig.Config\n The parsed and validated config\n \"\"\"\n try:\n cr_manifest = aconfig.Config(resource, override_env_vars=False)\n except (ValueError, SyntaxError, AttributeError) as exc:\n raise ValueError(\"Failed to parse full_cr\") from exc\n\n return cr_manifest\n\n @classmethod\n def configure_logging(cls, cr_manifest: aconfig.Config, reconciliation_id: str):\n \"\"\"Configure the logging for a given reconcile\n\n Args:\n cr_manifest: aconfig.Config\n The resource to get annotation overrides from\n reconciliation_id: str\n The unique id for the reconciliation\n \"\"\"\n\n # Fetch the annotations for logging\n # NOTE: We use safe fetching here because this happens before CR\n # verification in the Session constructor\n annotations = cr_manifest.get(\"metadata\", {}).get(\"annotations\", {})\n default_level = annotations.get(\n constants.LOG_DEFAULT_LEVEL_NAME, config.log_level\n )\n\n filters = annotations.get(constants.LOG_FILTERS_NAME, config.log_filters)\n log_json = annotations.get(constants.LOG_JSON_NAME, str(config.log_json))\n log_thread_id = annotations.get(\n constants.LOG_THREAD_ID_NAME, str(config.log_thread_id)\n )\n\n # Convert boolean args\n log_json = (log_json or \"\").lower() == \"true\"\n log_thread_id = (log_thread_id or \"\").lower() == \"true\"\n\n # Keep the old handler. This is useful if running with ansible as\n # it will preserve the handler generator set up to log to a file\n # since ansible captures all logging output\n handler_generator = None\n if logging.root.handlers:\n old_handler = logging.root.handlers[0]\n\n def handler_generator():\n return old_handler\n\n alog.configure(\n default_level=default_level,\n filters=filters,\n formatter=Oper8JsonFormatter(cr_manifest, reconciliation_id)\n if log_json\n else \"pretty\",\n thread_id=log_thread_id,\n handler_generator=handler_generator,\n )\n\n @classmethod\n def generate_id(cls) -> str:\n \"\"\"Generates a unique human readable id for this reconciliation\n\n Returns:\n id: str\n A unique base32 encoded id\n \"\"\"\n uuid4 = uuid.uuid4()\n base32_str = base64.b32encode(uuid4.bytes).decode(\"utf-8\")\n reconcile_id = base32_str[:22]\n log.debug(\"Generated reconcile id: %s\", reconcile_id)\n return reconcile_id\n\n def setup_vcs(self, cr_manifest: aconfig.Config):\n \"\"\"Setups the VCS directory and sys.path for a reconcile.\n This function also ensures that the version is valid if\n config.strict_versioning is enabled.\n\n Args:\n cr_manifest: aconfig.Config\n The cr manifest to pull the requested version from.\n \"\"\"\n version = get_manifest_version(cr_manifest)\n if not version:\n raise ValueError(\"CR Manifest has no version\")\n\n log.debug(\n \"Setting up working directory with src: %s and version: %s\",\n self.home_dir,\n version,\n )\n working_dir = self._setup_directory(cr_manifest, version)\n\n # Construct working dir path from vcs and git directory\n if config.vcs.module_dir:\n module_path = pathlib.Path(config.vcs.module_dir)\n working_dir = working_dir / module_path\n\n if not working_dir.is_dir():\n log.error(\n \"Working directory %s could not be found. Invalid module path\",\n working_dir,\n )\n raise ConfigError(\n f\"Module path: '{module_path}' could not be found in repository\"\n )\n\n log.debug4(\"Changing working directory to %s\", working_dir)\n os.chdir(working_dir)\n sys.path.insert(0, str(working_dir))\n\n def setup_controller(\n self, controller_info: CONTROLLER_INFO\n ) -> CONTROLLER_CLASS_TYPE:\n \"\"\"\n Import the requested Controller class and enable any compatibility layers\n\n Args:\n controller_info:CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n Returns:\n controller:\n The required Controller Class\n \"\"\"\n\n # Local\n from .controller import ( # pylint: disable=import-outside-toplevel, cyclic-import\n Controller,\n )\n\n # If controller info is already a constructed controller then\n # skip importing\n if isinstance(controller_info, Controller):\n return controller_info\n\n controller_class = self._import_controller(controller_info)\n return self._configure_controller(controller_class)\n\n def setup_deploy_manager(self, cr_manifest: aconfig.Config) -> DeployManagerBase:\n \"\"\"\n Configure a deploy_manager for a reconcile given a manifest\n\n Args:\n cr_manifest: aconfig.Config\n The resource to be used as an owner_ref\n\n Returns:\n deploy_manager: DeployManagerBase\n The deploy_manager to be used during reconcile\n \"\"\"\n if self.deploy_manager:\n return self.deploy_manager\n\n if config.dry_run:\n log.debug(\"Using DryRunDeployManager\")\n return DryRunDeployManager()\n\n log.debug(\"Using OpenshiftDeployManager\")\n return OpenshiftDeployManager(owner_cr=cr_manifest)\n\n def setup_session(\n self,\n controller: CONTROLLER_TYPE,\n cr_manifest: aconfig.Config,\n deploy_manager: DeployManagerBase,\n reconciliation_id: str,\n ) -> Session:\n \"\"\"Construct the session, including gathering the backend config and any temp patches\n\n Args:\n controller: Controller\n The controller class being reconciled\n cr_manifest: aconfig.Config\n The resource being reconciled\n deploy_manager: DeployManagerBase\n The deploy manager used in the cluster\n reconciliation_id: str\n The id for the reconcile\n\n Return:\n session: Session\n The session for reconcile\n \"\"\"\n # Get backend config for reconciliation\n controller_defaults = controller.get_config_defaults()\n reconciliation_config = self._get_reconcile_config(\n cr_manifest=cr_manifest,\n deploy_manager=deploy_manager,\n controller_defaults=controller_defaults,\n )\n log.debug4(\"Gathered Config: %s\", reconciliation_config)\n\n # Get Temporary patches\n patches = self._get_temp_patches(deploy_manager, cr_manifest)\n log.debug3(\"Found %d patches\", len(patches))\n\n # Get the complete CR Manifest including defaults\n cr_manifest_defaults = controller.get_cr_manifest_defaults()\n full_cr_manifest = merge_configs(\n aconfig.Config(cr_manifest_defaults),\n cr_manifest,\n )\n\n return Session(\n reconciliation_id=reconciliation_id,\n cr_manifest=full_cr_manifest,\n config=reconciliation_config,\n deploy_manager=deploy_manager,\n temporary_patches=patches,\n )\n\n def run_controller(\n self, controller: CONTROLLER_TYPE, session: Session, is_finalizer: bool\n ) -> ReconciliationResult:\n \"\"\"Run the Controller's reconciliation or finalizer with the constructed Session.\n This function also updates the CR status and handles requeue logic.\n\n Args:\n controller: Controller\n The Controller being reconciled\n session: Session\n The current Session state\n is_finalizer:\n Whether the resource is being deleted\n\n Returns:\n reconciliation_result: ReconciliationResult\n The result of the reconcile\n \"\"\"\n log.info(\n \"%s resource %s/%s/%s\",\n \"Finalizing\" if is_finalizer else \"Reconciling\",\n session.kind,\n session.namespace,\n session.name,\n )\n\n # Ensure the resource has the proper finalizers\n if controller.has_finalizer:\n add_finalizer(session, controller.finalizer)\n\n # Update the Resource status\n if config.manage_status:\n self._update_reconcile_start_status(session)\n\n # Reconcile the controller\n completion_state = controller.run_reconcile(\n session,\n is_finalizer=is_finalizer,\n )\n\n if config.manage_status:\n self._update_reconcile_completion_status(session, completion_state)\n\n # Check if the controller session should requeue\n requeue, requeue_params = controller.should_requeue(session)\n if not requeue_params:\n requeue_params = RequeueParams()\n\n # Remove managed finalizers if not requeuing\n if not requeue and is_finalizer and controller.has_finalizer:\n remove_finalizer(session, controller.finalizer)\n\n return ReconciliationResult(requeue=requeue, requeue_params=requeue_params)\n\n ## Implementation Details ############################################################\n\n @classmethod\n def _is_paused(cls, cr_manifest: aconfig.Config) -> bool:\n \"\"\"Check if a manifest has a paused annotation\n\n Args:\n cr_manifest: aconfig.Config\n The manifest becking checked\n\n Returns:\n is_paused: bool\n If the manifest contains the paused annotation\n \"\"\"\n annotations = cr_manifest.metadata.get(\"annotations\", {})\n paused = annotations.get(constants.PAUSE_ANNOTATION_NAME)\n return paused and paused.lower() == \"true\"\n\n def _check_strict_versioning(self, cr_manifest: aconfig.Config):\n \"\"\"Check the version against config and vcs directory\n\n Args:\n version_directory: str\n The repo directory to check\n version: str\n The version from the manifest\n \"\"\"\n version = get_manifest_version(cr_manifest)\n if not version:\n raise ValueError(\"CR Manifest has no version\")\n\n # Ensure version is in list of supported versions\n assert_config(\n version in config.supported_versions,\n f\"Unsupported version: {version}\",\n )\n\n # If VCS is enabled ensure the branch or tag exists\n if self.vcs:\n repo_versions = self.vcs.list_refs()\n assert_config(\n version in repo_versions,\n f\"Version not found in repo: {version}\",\n )\n log.debug3(\"Supported VCS Versions: %s\", repo_versions)\n\n def _setup_directory(\n self, cr_manifest: aconfig.Config, version: str\n ) -> pathlib.Path:\n \"\"\"Construct the VCS directory from the cr_manifest and version. Then\n checkout the directory\n\n Args:\n cr_manifest: aconfig.Config\n The manifest to be used for the checkout path\n version: str\n The version to checkout\n\n Returns:\n destination_directory: pathlib.Path\n The destination directory for the checkout\n \"\"\"\n\n # Generate checkout directory and ensure path exists\n def sanitize_for_path(path):\n keepcharacters = (\" \", \".\", \"_\")\n return \"\".join(\n c for c in path if c.isalnum() or c in keepcharacters\n ).rstrip()\n\n # Setup destination templating to allow for CR specific checkout paths\n # The entirety of the cr_manifest is included as a dict as well as some\n # custom keys\n template_mappings = {\n # Include the entire dict first so that the sanitized default values\n # take precedence\n **cr_manifest,\n \"version\": version,\n \"kind\": sanitize_for_path(cr_manifest.kind),\n \"apiVersion\": sanitize_for_path(cr_manifest.apiVersion),\n \"namespace\": sanitize_for_path(cr_manifest.metadata.namespace),\n \"name\": sanitize_for_path(cr_manifest.metadata.name),\n }\n\n # Get the checkout directory and method\n try:\n formatted_path = config.vcs.dest.format(**template_mappings)\n except KeyError as exc:\n log.warning(\n \"Invalid key: %s found in vcs destination template\", exc, exc_info=True\n )\n raise ConfigError(\"Invalid Key found in vcs destination template\") from exc\n\n checkout_dir = pathlib.Path(formatted_path)\n checkout_method = VCSCheckoutMethod(config.vcs.checkout_method)\n\n log.debug2(\n \"Checking out into directory %s with method %s\",\n checkout_dir,\n checkout_method.value,\n )\n self.vcs.checkout_ref(version, checkout_dir, checkout_method)\n return checkout_dir\n\n def _import_controller(\n self, controller_info: CONTROLLER_INFO\n ) -> CONTROLLER_CLASS_TYPE:\n \"\"\"Parse the controller info and reimport the controller\n\n Args:\n controller_info:CONTROLLER_INFO\n The description of a controller. See CONTROLLER_INFO for\n more information\n Returns:\n controller_class: Type[Controller]\n The reimported Controller\n\n \"\"\"\n log.debug2(\"Parsing controller_info\")\n if isinstance(controller_info, str):\n class_module_parts = controller_info.rsplit(\".\", maxsplit=1)\n assert_config(\n len(class_module_parts) == 2,\n f\"Invalid controller_class [{controller_info}]. Format is <module>.<class>\",\n )\n module_name, class_name = class_module_parts\n else:\n class_name = controller_info.__name__\n module_name = controller_info.__module__\n\n # Reimport module if reimporting is enabled and if it already exists\n if self.reimport_controller and module_name in sys.modules:\n log.debug2(\"UnImporting controller module: %s\", module_name)\n sys.modules.pop(module_name)\n\n # UnImport the controller and any parent modules\n # so controller can be reimported from the most\n # recent sys path\n module_parts = module_name.split(\".\")\n for i in range(1, len(module_parts)):\n parent_module = \".\".join(module_parts[:-i])\n if parent_module in sys.modules:\n log.debug3(\"UnImporting module: %s\", parent_module)\n sys.modules.pop(parent_module, None)\n\n log.debug2(\"Attempting to import [%s.%s]\", module_name, class_name)\n\n # Attempt to import the module\n try:\n app_module = importlib.import_module(module_name)\n if not hasattr(app_module, class_name):\n raise ConfigError(\n f\"Invalid controller_class [{class_name}].\"\n f\" Class not found in module [{module_name}]\"\n )\n controller_class = getattr(app_module, class_name)\n\n # Import controller in function to avoid circular imports\n # Local\n from .controller import ( # pylint: disable=import-outside-toplevel\n Controller,\n )\n\n if not issubclass(controller_class, Controller):\n raise ConfigError(\n f\"Invalid controller_class [{module_name}.{class_name}].\"\n f\" [{class_name}] is not a Controller\"\n )\n\n except ImportError as exc:\n log.error(\n \"Failed to import [%s.%s]. Failed to import [%s]\",\n module_name,\n class_name,\n module_name,\n exc_info=True,\n )\n raise ConfigError(\"Invalid Controller Class Specified\") from exc\n\n log.debug(\n \"Imported Controller %s from file %s\",\n controller_class,\n sys.modules[controller_class.__module__].__file__,\n )\n\n return controller_class\n\n def _configure_controller(\n self, controller_class: CONTROLLER_CLASS_TYPE\n ) -> CONTROLLER_TYPE:\n \"\"\"Construct the Controller Class\n\n Args:\n controller_class: CONTROLLER_CLASS_TYPE\n The Controller class to be reconciled\n\n Returns:\n controller: Controller\n The constructed Controller\n\n \"\"\"\n log.debug3(\"Constructing controller\")\n controller = controller_class()\n return controller\n\n def _get_reconcile_config(\n self,\n cr_manifest: aconfig.Config,\n deploy_manager: DeployManagerBase,\n controller_defaults: aconfig.Config,\n ) -> aconfig.Config:\n \"\"\"Construct the flattened backend config for this reconciliation\n starting with a deepcopy of the base and merge in overrides from the CR\n\n Args:\n cr_manifest: aconfig.Config:\n The manifest to get overrides from\n deploy_manager: DeployManagerBase:\n The deploy manager to get the default configmap config\n controller_defaults: aconfig.Config:\n The config defaults provided by the controller class\n\n Returns:\n reconcile_config: aconfig.Config\n The reconciliation config\n \"\"\"\n metadata = cr_manifest.get(\"metadata\", {})\n annotations = metadata.get(\"annotations\", {})\n namespace = metadata.get(\"namespace\")\n cr_config_defaults = cr_manifest.get(constants.CONFIG_OVERRIDES, {})\n annotation_config_defaults = {}\n if constants.CONFIG_DEFAULTS_ANNOTATION_NAME in annotations:\n log.debug(\"Pulling config_defaults based on annotation\")\n config_defaults_name = annotations[\n constants.CONFIG_DEFAULTS_ANNOTATION_NAME\n ]\n\n # Allow sub-keys to be deliniated by \"/\"\n parts = config_defaults_name.split(\"/\")\n config_defaults_cm_name = parts[0]\n\n log.debug2(\n \"Reading config_defaults from ConfigMap [%s]\", config_defaults_cm_name\n )\n success, content = deploy_manager.get_object_current_state(\n kind=\"ConfigMap\",\n name=config_defaults_cm_name,\n namespace=namespace,\n api_version=\"v1\",\n )\n assert_cluster(success, \"Failed to look up config defaults form ConfigMap\")\n assert_config(\n content is not None,\n f\"Did not find configured config defaults ConfigMap: {config_defaults_cm_name}\",\n )\n assert_config(\"data\" in content, \"Got ConfigMap content with out 'data'\")\n config_defaults_content = content[\"data\"]\n assert_config(\n isinstance(config_defaults_content, dict),\n f\"Incorrectly formatted config_defaults ConfigMap: {config_defaults_cm_name}\",\n )\n\n # Parse as a Config\n log.debug2(\"Parsing app config dict\")\n annotation_config_defaults = aconfig.Config(\n config_defaults_content, override_env_vars=False\n )\n\n return merge_configs(\n copy.deepcopy(controller_defaults),\n merge_configs(annotation_config_defaults, cr_config_defaults),\n )\n\n def _get_temp_patches( # pylint: disable=too-many-locals\n self, deploy_manager: DeployManagerBase, cr_manifest: aconfig.Config\n ) -> List[aconfig.Config]:\n \"\"\"Fetch the ordered list of temporary patches that should apply to this\n rollout.\n\n Args:\n deploy_manager: DeployManagerBase\n The DeployManager used to get the current temporary patches\n cr_manifest: aconfig.Config\n The manifest of this reconciliation\n \"\"\"\n\n # Look for patch annotations on the CR\n patch_annotation = (\n cr_manifest.get(\"metadata\", {})\n .get(\"annotations\", {})\n .get(constants.TEMPORARY_PATCHES_ANNOTATION_NAME, \"{}\")\n )\n log.debug3(\"Raw patch annotation: %s\", patch_annotation)\n try:\n raw_patches = json.loads(patch_annotation)\n if not isinstance(raw_patches, dict):\n msg = f\"Patches annotation not a dict: {raw_patches}\"\n log.error(msg)\n raise RolloutError(msg)\n patches = {}\n for patch_name, patch_meta in raw_patches.items():\n patch_meta[\"timestamp\"] = dateutil.parser.parse(patch_meta[\"timestamp\"])\n patches[patch_name] = patch_meta\n if \"api_version\" not in patch_meta:\n raise KeyError(\"api_version\")\n except json.decoder.JSONDecodeError as err:\n msg = f\"Could not parse patches from annotation [{patch_annotation}]\"\n log.error(msg)\n raise RolloutError(msg) from err\n except dateutil.parser.ParserError as err:\n msg = f\"Failed to parse patch timestamp [{patch_annotation}]\"\n log.error(msg)\n raise RolloutError(msg) from err\n except KeyError as err:\n msg = f\"Patch meta incorrectly formatted [{patch_annotation}]\"\n log.error(msg)\n raise RolloutError(msg) from err\n\n # Fetch the state of each patch and add it to the output, sorted by\n # timestamp with the earliest first\n temporary_patches = []\n for patch_name, patch_meta in sorted(\n list(patches.items()), key=lambda x: x[1][\"timestamp\"]\n ):\n # Do the fetch\n log.debug2(\"Fetching patch [%s/%s]\", patch_name, patch_meta[\"timestamp\"])\n namespace = cr_manifest.get(\"metadata\", {}).get(\"namespace\")\n patch_api_version = patch_meta[\"api_version\"]\n patch_kind = patch_meta.get(\"kind\", \"TemporaryPatch\")\n success, content = deploy_manager.get_object_current_state(\n kind=patch_kind,\n name=patch_name,\n api_version=patch_api_version,\n namespace=namespace,\n )\n assert_cluster(success, f\"Failed to fetch patch content for [{patch_name}]\")\n assert_config(content is not None, f\"Patch not found [{patch_name}]\")\n\n # Pull the patch spec and add it to the list\n assert_config(\n content.get(\"spec\") is not None,\n f\"No spec found in patch [{patch_name}]\",\n )\n temporary_patches.append(aconfig.Config(content, override_env_vars=False))\n\n return temporary_patches\n\n ## Status Details ############################################################\n\n def _update_resource_status(\n self, deploy_manager: DeployManagerBase, manifest: aconfig.Config, **kwargs\n ) -> dict:\n \"\"\"Helper function to update the status of a resource given a deploy_manager, manifest\n and status kwargs\n\n Args:\n deploy_manager: DeployManagerBase\n The DeployManager used to update the resource\n manifest: aconfig.Config\n The manifest of the resource being updated\n **kwargs:\n The key word arguments passed to update_resource_status\n\n Returns:\n updated_status: dict\n The updated status applied to the resource\n \"\"\"\n return status.update_resource_status(\n deploy_manager,\n manifest.kind,\n manifest.api_version,\n manifest.metadata.name,\n manifest.metadata.namespace,\n **kwargs,\n )\n\n def _update_reconcile_start_status(self, session: Session):\n \"\"\"Update the status for a resource at the start of a reconciliation\n\n Args:\n session: Session\n The session of the reconcile which includes the DeployManager and resource\n\n \"\"\"\n ready_condition = status.get_condition(status.READY_CONDITION, session.status)\n ready_reason = ready_condition.get(\"reason\")\n if ready_reason is None or session.current_version is None:\n ready_reason = status.ReadyReason.INITIALIZING\n\n optional_kwargs = {}\n if session.current_version and session.version != session.current_version:\n log.debug(\n \"Version change detected: %s -> %s\",\n session.current_version,\n session.version,\n )\n optional_kwargs = {\n \"updating_reason\": status.UpdatingReason.VERSION_CHANGE,\n \"updating_message\": \"Version Change Started: \"\n f\"[{session.current_version}] -> [{session.version}]\",\n }\n ready_reason = status.ReadyReason.IN_PROGRESS\n\n self._update_resource_status(\n session.deploy_manager,\n session.cr_manifest,\n ready_reason=ready_reason,\n ready_message=ready_condition.get(\"message\", \"Initial Rollout Started\"),\n supported_versions=config.supported_versions,\n **optional_kwargs,\n )\n\n def _update_reconcile_completion_status(\n self, session: Session, completion_state: CompletionState\n ):\n \"\"\"Perform CR status updates based on the results of the rollout steps. The status logic is\n as follows:\n 1. Initial Rollout: Ready-INITIALIZING, Updating-VERIFY_WAIT\n 2. Everything complete: Ready-STABLE, Updating-STABLE\n 3. Everything except after_verify: Ready-IN_PROGRESS, Updating-STABLE\n 4. other: Updating-VERIFY_WAIT\n\n Args:\n session: Session\n The session of the reconcile which includes the DeployManager and resource\n completion_state: CompletionState\n The result of the rollout\n \"\"\"\n status_update = {\"component_state\": completion_state}\n\n # If everything completed and verified, set ready and updating to STABLE\n # and set the status's reconciled version to the desired version\n if completion_state.verify_completed():\n status_update[\"ready_reason\"] = status.ReadyReason.STABLE\n status_update[\"ready_message\"] = \"Verify Complete\"\n status_update[\"updating_reason\"] = status.UpdatingReason.STABLE\n status_update[\"updating_message\"] = \"Rollout Complete\"\n status_update[\"version\"] = session.version\n\n # If the completion_state didn't fail then update the ready condition with\n # in_progress and the updating condition with verification incomplete\n else:\n current_status = session.get_status()\n\n # If not initializing then update the ready condition with in_progress\n current_ready_cond = status.get_condition(\n status.READY_CONDITION, current_status\n )\n if (\n current_ready_cond.get(\"reason\")\n != status.ReadyReason.INITIALIZING.value\n ):\n status_update[\"ready_reason\"] = status.ReadyReason.IN_PROGRESS\n status_update[\"ready_message\"] = \"Verify InProgress\"\n\n status_update[\"updating_reason\"] = status.UpdatingReason.VERIFY_WAIT\n status_update[\"updating_message\"] = \"Component verification incomplete\"\n\n log.debug3(\"Updating status after reconcile: %s\", status_update)\n self._update_resource_status(\n session.deploy_manager, session.cr_manifest, **status_update\n )\n\n def _update_error_status(\n self, resource: Union[dict, aconfig.Config], error: Exception\n ) -> dict:\n \"\"\"Update the status of a resource after an error occurred. This function\n setups up it's own deploy manager and parses the resource. This way errors at any\n phase of reconciliation can still get updated\n\n Args:\n resource: Union[dict, aconfig.Config]\n The resource that's status is being updated\n error: Exception\n The exception that stopped the reconciliation\n\n Returns:\n status: dict\n The updated status after the error message\n \"\"\"\n cr_manifest = self.parse_manifest(resource)\n deploy_manager = self.setup_deploy_manager(resource)\n\n # Get the completion state if possible\n component_state = getattr(error, \"completion_state\", None)\n\n # Expected Oper8 Errors\n if isinstance(error, PreconditionError):\n status_update = {\n \"updating_reason\": status.UpdatingReason.PRECONDITION_WAIT,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n elif isinstance(error, (VerificationError, Oper8ExpectedError)):\n status_update = {\n \"updating_reason\": status.UpdatingReason.VERIFY_WAIT,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n elif isinstance(error, ConfigError):\n status_update = {\n \"ready_reason\": status.ReadyReason.CONFIG_ERROR,\n \"ready_message\": str(error),\n \"updating_reason\": status.UpdatingReason.ERRORED,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n elif isinstance(error, ClusterError):\n status_update = {\n \"updating_reason\": status.UpdatingReason.CLUSTER_ERROR,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n\n elif isinstance(error, (RolloutError, Oper8FatalError)):\n status_update = {\n \"ready_reason\": status.ReadyReason.ERRORED,\n \"ready_message\": str(error),\n \"updating_reason\": status.UpdatingReason.ERRORED,\n \"updating_message\": str(error),\n \"component_state\": component_state,\n }\n\n # Catchall for non oper8 errors\n else:\n status_update = {\n \"ready_reason\": status.ReadyReason.ERRORED,\n \"ready_message\": str(error),\n \"updating_reason\": status.UpdatingReason.ERRORED,\n \"updating_message\": str(error),\n }\n\n return self._update_resource_status(\n deploy_manager, cr_manifest, **status_update\n )" }, { "identifier": "READY_CONDITION", "path": "oper8/status.py", "snippet": "READY_CONDITION = \"Ready\"" }, { "identifier": "get_condition", "path": "oper8/status.py", "snippet": "def get_condition(type_name: str, current_status: dict) -> dict:\n \"\"\"Extract the given condition type from a status object\n\n Args:\n type: str\n The condition type to fetch\n current_status: dict\n The dict representation of the status for a given application\n\n Returns:\n condition: dict\n The condition object if found, empty dict otherwise\n \"\"\"\n cond = [\n cond\n for cond in current_status.get(\"conditions\", [])\n if cond.get(\"type\") == type_name\n ]\n if cond:\n assert len(cond) == 1, f\"Found multiple condition entries for {type_name}\"\n return cond[0]\n return {}" }, { "identifier": "abstractclassproperty", "path": "oper8/utils.py", "snippet": "class abstractclassproperty: # pylint: disable=invalid-name,too-few-public-methods\n \"\"\"This decorator implements a classproperty that will raise when accessed\"\"\"\n\n def __init__(self, func):\n self.prop_name = func.__name__\n\n def __get__(self, *args):\n # If this is being called by __setattr__, we're ok because it's\n # apptempting to set the attribute on the class\n curframe = inspect.currentframe()\n callframe = inspect.getouterframes(curframe, 2)[1]\n caller_name = callframe[3]\n if caller_name == \"__setattr__\":\n return None\n\n # If this is a help() call or a pdoc documentation request, return an\n # object with a docstring indicating that the property is abstract\n if (\n \"help\" in callframe.frame.f_code.co_names\n or callframe.frame.f_globals[\"__name__\"] == \"pdoc\"\n ):\n\n class AbstractClassProperty: # pylint: disable=missing-class-docstring\n __slots__ = []\n __doc__ = f\"\"\"The <{self.prop_name}> property is an abstract class property\n that must be overwritten in derived children\n \"\"\"\n\n return AbstractClassProperty\n\n raise NotImplementedError(\n f\"Cannot access abstractclassproperty {self.prop_name}\"\n )" }, { "identifier": "obj_to_hash", "path": "oper8/watch_manager/python_watch_manager/utils/common.py", "snippet": "def obj_to_hash(obj: Any) -> str:\n \"\"\"Get the hash of any jsonable python object\n\n Args:\n obj: Any\n The object to hash\n\n Returns:\n hash: str\n The hash of obj\n \"\"\"\n return hash(json.dumps(obj, sort_keys=True))" }, { "identifier": "RESERVED_PLATFORM_ANNOTATIONS", "path": "oper8/watch_manager/python_watch_manager/utils/constants.py", "snippet": "RESERVED_PLATFORM_ANNOTATIONS = [\n \"k8s.io\",\n \"kubernetes.io\",\n \"openshift.io\",\n]" }, { "identifier": "RESOURCE_VERSION_KEEP_COUNT", "path": "oper8/watch_manager/python_watch_manager/utils/constants.py", "snippet": "RESOURCE_VERSION_KEEP_COUNT = 20" } ]
from abc import ABC, abstractmethod from collections import deque from typing import Optional from ....deploy_manager import KubeEventType from ....managed_object import ManagedObject from ....reconcile import ReconcileManager from ....status import READY_CONDITION, get_condition from ....utils import abstractclassproperty from ..utils import ( RESERVED_PLATFORM_ANNOTATIONS, RESOURCE_VERSION_KEEP_COUNT, obj_to_hash, ) import alog
11,921
"""Filter for duplicate resource versions which happens when restarting a watch connection""" def __init__(self, resource: ManagedObject): """Initialize the resource version list""" # Use a dequeue instead of a list/set to set a bound on the number # of tracked versions self.resource_versions = deque([], maxlen=RESOURCE_VERSION_KEEP_COUNT) super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if the resource's resourceVersion has been seen before""" # Don't skip add events as the kubernetes watch can duplicate events if event == KubeEventType.DELETED: return return resource.resource_version not in self.resource_versions def update(self, resource: ManagedObject): """Add the resources ResourceVersion to the list""" self.resource_versions.append(resource.resource_version) ### Annotation Filters class AnnotationFilter(Filter): """Filter resources to reconcile on annotation changes""" def __init__(self, resource: ManagedObject): """Initialize the annotation hash variable""" self.annotations = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resource's annotation has changed""" # Ignore Added and deleted events if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.annotations != self.get_annotation_hash(resource) def update(self, resource: ManagedObject): """Update the currently stored annotation""" self.annotations = self.get_annotation_hash(resource) def get_annotation_hash(self, resource: ManagedObject) -> str: """Helper function to get the annotation hash""" return obj_to_hash(resource.metadata.get("annotations", {})) class UserAnnotationFilter(AnnotationFilter): """Filter resources to reconcile on user annotation changes. This excludes kubernetes and openshift annotations """ def get_annotation_hash(self, resource: ManagedObject) -> str: """Overriden function to exclude common platform annotations from the annotation hash""" output_annotations = {} for key, value in resource.metadata.get("annotations", {}).items(): if self.contains_platform_key(key): continue output_annotations[key] = value return obj_to_hash(output_annotations) def contains_platform_key(self, key: str) -> bool: """Helper to check if the key contains one of the platform annotations""" return any( reserved_key in key for reserved_key in RESERVED_PLATFORM_ANNOTATIONS ) ### Oper8 Filters class PauseFilter(Filter): """This filter skips resources that have the oper8 pause annotation""" def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test if a resource has the pause annotation""" return not ReconcileManager._is_paused( # pylint: disable=protected-access resource ) class SubsystemStatusFilter(Filter): """Reconcile oper8 controllers when their oper8 status changes EXPERIMENTAL: This has passed basic validation but has not been rigorously tested in the field """ def __init__(self, resource: ManagedObject): """Initialize the currently observed ready condition""" self.ready_condition = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resources subsystem condition has changed""" if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.ready_condition != get_condition(
""" Filters are used to limit the amount of events being reconciled by a watch manager This is based off of the kubernetes controller runtime's "predicates": https://pkg.go.dev/sigs.k8s.io/[email protected]/pkg/predicate#Funcs The default set of filters is derived from operator-sdk's ansible predicates https://github.com/operator-framework/operator-sdk/blob/50c6ac03746ff4edf582feb9a71d2a7ea6ae6c40/internal/ansible/controller/controller.go#L105 """ # Standard # First Party # Local log = alog.use_channel("PWMFLT") ## Default Types class Filter(ABC): """Generic Filter Interface for subclassing. Every subclass should implement a `test` function which returns true when a resource should be reconciled. Subclasses can optionally implement a `update` method if the filter requires storing some stateful information like ResourceVersion or Metadata. NOTE: A unique Filter instance is created for each resource """ def __init__(self, resource: ManagedObject): # noqa: B027 """Initializer can be used to detect configuration or create instance variables. Even though a resource is provided it should not set state until update is called Args: resource: ManagedObject This resource can be used by subclass to gather generic information. """ ## Abstract Interface ###################################################### # # These functions must be implemented by child classes ## @abstractmethod def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test whether the resource&event passes the filter. Returns true if the filter should be reconciled and return false if it should not be. A filter can optionally return None to ignore an event Args: resource: ManagedObject The current resource being checked event: KubeEventType The event type that triggered this filter Returns: result: Optional[bool] The result of the test. """ ## Base Class Interface #################################################### # # These methods MAY be implemented by children, but contain default # implementations that are appropriate for simple cases. # ## def update(self, resource: ManagedObject): # noqa: B027 """Update the instances current state. Args: resource: ManagedObject The current state of the resource """ def update_and_test(self, resource: ManagedObject, event: KubeEventType) -> bool: """First test a resource/event against a filter then update the current state Args: resource: ManagedObject The resource being filtered event: KubeEventType The event to be filtered Returns: test_result: bool The test result """ result = self.test(resource, event) if result is not None and not result: log.debug3( "Failed filter: %s with return val %s", self, result, extra={"resource": resource}, ) self.update(resource) return result ## Generic Resource filters class CreationDeletionFilter(Filter): """Filter to ensure reconciliation on creation and deletion events""" def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if event is ADDED or DELETED""" # Ignore non Added/Deleted Events if event not in [KubeEventType.ADDED, KubeEventType.DELETED]: return return True class GenerationFilter(Filter): """Filter for reconciling on generation changes for resources that support it""" def __init__(self, resource: ManagedObject): """Set generation instance variable""" super().__init__(resource) self.generation = None def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return true if resource generation is different than before""" # Only update&test resources with a generation if not self.generation: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Test if new generation is different return self.generation != resource.metadata.get("generation") def update(self, resource: ManagedObject): """Update the currently observed generation""" self.generation = resource.metadata.get("generation") class NoGenerationFilter(Filter): """Filter for reconciling changes to spec on resources that don't support the generation field like pods. It does this by hashing the objects excluding status and metadata""" def __init__(self, resource: ManagedObject): """Check if resource supports generation and initialize the hash dict""" self.supports_generation = resource.metadata.get("generation") is not None self.resource_hashes = {} super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Return True if a resources current hash differs from the current""" # Don't test resources that support generation or if we don't have hashes yet if self.supports_generation or not self.resource_hashes: return # Only test on resource updates if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return # Check each stored resource hash to see if its # changed for key, obj_has in self.resource_hashes.items(): if obj_has != obj_to_hash(resource.get(key)): log.debug2("Detected change in %s", key) return True return False def update(self, resource: ManagedObject): """Update the observed spec hashes""" if self.supports_generation: return # Get the default hashes for all object keys except metadata # and status for key, obj in resource.definition.items(): if key in ["metadata", "status", "kind", "apiVersion"]: continue self.resource_hashes[key] = obj_to_hash(obj) class ResourceVersionFilter(Filter): """Filter for duplicate resource versions which happens when restarting a watch connection""" def __init__(self, resource: ManagedObject): """Initialize the resource version list""" # Use a dequeue instead of a list/set to set a bound on the number # of tracked versions self.resource_versions = deque([], maxlen=RESOURCE_VERSION_KEEP_COUNT) super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if the resource's resourceVersion has been seen before""" # Don't skip add events as the kubernetes watch can duplicate events if event == KubeEventType.DELETED: return return resource.resource_version not in self.resource_versions def update(self, resource: ManagedObject): """Add the resources ResourceVersion to the list""" self.resource_versions.append(resource.resource_version) ### Annotation Filters class AnnotationFilter(Filter): """Filter resources to reconcile on annotation changes""" def __init__(self, resource: ManagedObject): """Initialize the annotation hash variable""" self.annotations = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resource's annotation has changed""" # Ignore Added and deleted events if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.annotations != self.get_annotation_hash(resource) def update(self, resource: ManagedObject): """Update the currently stored annotation""" self.annotations = self.get_annotation_hash(resource) def get_annotation_hash(self, resource: ManagedObject) -> str: """Helper function to get the annotation hash""" return obj_to_hash(resource.metadata.get("annotations", {})) class UserAnnotationFilter(AnnotationFilter): """Filter resources to reconcile on user annotation changes. This excludes kubernetes and openshift annotations """ def get_annotation_hash(self, resource: ManagedObject) -> str: """Overriden function to exclude common platform annotations from the annotation hash""" output_annotations = {} for key, value in resource.metadata.get("annotations", {}).items(): if self.contains_platform_key(key): continue output_annotations[key] = value return obj_to_hash(output_annotations) def contains_platform_key(self, key: str) -> bool: """Helper to check if the key contains one of the platform annotations""" return any( reserved_key in key for reserved_key in RESERVED_PLATFORM_ANNOTATIONS ) ### Oper8 Filters class PauseFilter(Filter): """This filter skips resources that have the oper8 pause annotation""" def test(self, resource: ManagedObject, event: KubeEventType) -> Optional[bool]: """Test if a resource has the pause annotation""" return not ReconcileManager._is_paused( # pylint: disable=protected-access resource ) class SubsystemStatusFilter(Filter): """Reconcile oper8 controllers when their oper8 status changes EXPERIMENTAL: This has passed basic validation but has not been rigorously tested in the field """ def __init__(self, resource: ManagedObject): """Initialize the currently observed ready condition""" self.ready_condition = None super().__init__(resource) def test( # pylint: disable=inconsistent-return-statements self, resource: ManagedObject, event: KubeEventType, ) -> Optional[bool]: """Test if a resources subsystem condition has changed""" if event in [KubeEventType.ADDED, KubeEventType.DELETED]: return return self.ready_condition != get_condition(
READY_CONDITION, resource.get("status", {})
3
2023-11-15 16:43:29+00:00
16k
Jisencc/yolov5_dual_weighting
segment/predict.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine or triton # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {\n int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n core = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if ov_model.get_parameters()[0].get_layout().empty:\n ov_model.get_parameters()[0].set_layout(Layout('NCHW'))\n batch_dim = get_batch(ov_model)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, 'r') as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith('tensorflow')\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.ov_compiled_model(im).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.BILINEAR)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "IMG_FORMATS", "path": "utils/dataloaders.py", "snippet": "IMG_FORMATS = 'bmp', 'dng', 'jpeg', 'jpg', 'mpo', 'png', 'tif', 'tiff', 'webp', 'pfm' # include image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/dataloaders.py", "snippet": "VID_FORMATS = 'asf', 'avi', 'gif', 'm4v', 'mkv', 'mov', 'mp4', 'mpeg', 'mpg', 'ts', 'wmv' # include video suffixes" }, { "identifier": "LoadImages", "path": "utils/dataloaders.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n if isinstance(path, str) and Path(path).suffix == '.txt': # *.txt file with img/vid/dir on each line\n path = Path(path).read_text().rsplit()\n files = []\n for p in sorted(path) if isinstance(path, (list, tuple)) else [path]:\n p = str(Path(p).resolve())\n if '*' in p:\n files.extend(sorted(glob.glob(p, recursive=True))) # glob\n elif os.path.isdir(p):\n files.extend(sorted(glob.glob(os.path.join(p, '*.*')))) # dir\n elif os.path.isfile(p):\n files.append(p) # files\n else:\n raise FileNotFoundError(f'{p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n self.transforms = transforms # optional\n self.vid_stride = vid_stride # video frame-rate stride\n if any(videos):\n self._new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n for _ in range(self.vid_stride):\n self.cap.grab()\n ret_val, im0 = self.cap.retrieve()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n path = self.files[self.count]\n self._new_video(path)\n ret_val, im0 = self.cap.read()\n\n self.frame += 1\n # im0 = self._cv2_rotate(im0) # for use if cv2 autorotation is False\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n im0 = cv2.imread(path) # BGR\n assert im0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n\n return path, im, im0, self.cap, s\n\n def _new_video(self, path):\n # Create a new video capture object\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT) / self.vid_stride)\n self.orientation = int(self.cap.get(cv2.CAP_PROP_ORIENTATION_META)) # rotation degrees\n # self.cap.set(cv2.CAP_PROP_ORIENTATION_AUTO, 0) # disable https://github.com/ultralytics/yolov5/issues/8493\n\n def _cv2_rotate(self, im):\n # Rotate a cv2 video manually\n if self.orientation == 0:\n return cv2.rotate(im, cv2.ROTATE_90_CLOCKWISE)\n elif self.orientation == 180:\n return cv2.rotate(im, cv2.ROTATE_90_COUNTERCLOCKWISE)\n elif self.orientation == 90:\n return cv2.rotate(im, cv2.ROTATE_180)\n return im\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadScreenshots", "path": "utils/dataloaders.py", "snippet": "class LoadScreenshots:\n # YOLOv5 screenshot dataloader, i.e. `python detect.py --source \"screen 0 100 100 512 256\"`\n def __init__(self, source, img_size=640, stride=32, auto=True, transforms=None):\n # source = [screen_number left top width height] (pixels)\n check_requirements('mss')\n import mss\n\n source, *params = source.split()\n self.screen, left, top, width, height = 0, None, None, None, None # default to full screen 0\n if len(params) == 1:\n self.screen = int(params[0])\n elif len(params) == 4:\n left, top, width, height = (int(x) for x in params)\n elif len(params) == 5:\n self.screen, left, top, width, height = (int(x) for x in params)\n self.img_size = img_size\n self.stride = stride\n self.transforms = transforms\n self.auto = auto\n self.mode = 'stream'\n self.frame = 0\n self.sct = mss.mss()\n\n # Parse monitor shape\n monitor = self.sct.monitors[self.screen]\n self.top = monitor['top'] if top is None else (monitor['top'] + top)\n self.left = monitor['left'] if left is None else (monitor['left'] + left)\n self.width = width or monitor['width']\n self.height = height or monitor['height']\n self.monitor = {'left': self.left, 'top': self.top, 'width': self.width, 'height': self.height}\n\n def __iter__(self):\n return self\n\n def __next__(self):\n # mss screen capture: get raw pixels from the screen as np array\n im0 = np.array(self.sct.grab(self.monitor))[:, :, :3] # [:, :, :3] BGRA to BGR\n s = f'screen {self.screen} (LTWH): {self.left},{self.top},{self.width},{self.height}: '\n\n if self.transforms:\n im = self.transforms(im0) # transforms\n else:\n im = letterbox(im0, self.img_size, stride=self.stride, auto=self.auto)[0] # padded resize\n im = im.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n im = np.ascontiguousarray(im) # contiguous\n self.frame += 1\n return str(self.screen), im, im0, None, s # screen, img, original img, im0s, s" }, { "identifier": "LoadStreams", "path": "utils/dataloaders.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='file.streams', img_size=640, stride=32, auto=True, transforms=None, vid_stride=1):\n torch.backends.cudnn.benchmark = True # faster for fixed-size inference\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n self.vid_stride = vid_stride # video frame-rate stride\n sources = Path(sources).read_text().rsplit() if os.path.isfile(sources) else [sources]\n n = len(sources)\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if urlparse(s).hostname in ('www.youtube.com', 'youtube.com', 'youtu.be'): # if source is YouTube video\n # YouTube format i.e. 'https://www.youtube.com/watch?v=Zgi9g1ksQHc' or 'https://youtu.be/LNwODJXcvt4'\n check_requirements(('pafy', 'youtube_dl==2020.12.2'))\n import pafy\n s = pafy.new(s).getbest(preftype='mp4').url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n if s == 0:\n assert not is_colab(), '--source 0 webcam unsupported on Colab. Rerun command in a local environment.'\n assert not is_kaggle(), '--source 0 webcam unsupported on Kaggle. Rerun command in a local environment.'\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = cap.get(cv2.CAP_PROP_FPS) # warning: may return 0 or nan\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n self.fps[i] = max((fps if math.isfinite(fps) else 0) % 100, 0) or 30 # 30 FPS fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f'{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)')\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, img_size, stride=stride, auto=auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n self.auto = auto and self.rect\n self.transforms = transforms # optional\n if not self.rect:\n LOGGER.warning('WARNING ⚠️ Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f = 0, self.frames[i] # frame number, frame array\n while cap.isOpened() and n < f:\n n += 1\n cap.grab() # .read() = .grab() followed by .retrieve()\n if n % self.vid_stride == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING ⚠️ Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(0.0) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n im0 = self.imgs.copy()\n if self.transforms:\n im = np.stack([self.transforms(x) for x in im0]) # transforms\n else:\n im = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0] for x in im0]) # resize\n im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n im = np.ascontiguousarray(im) # contiguous\n\n return self.sources, im, im0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "masks2segments", "path": "utils/segment/general.py", "snippet": "def masks2segments(masks, strategy='largest'):\n # Convert masks(n,160,160) into segments(n,xy)\n segments = []\n for x in masks.int().cpu().numpy().astype('uint8'):\n c = cv2.findContours(x, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[0]\n if c:\n if strategy == 'concat': # concatenate all segments\n c = np.concatenate([x.reshape(-1, 2) for x in c])\n elif strategy == 'largest': # select largest segment\n c = np.array(c[np.array([len(x) for x in c]).argmax()]).reshape(-1, 2)\n else:\n c = np.zeros((0, 2)) # no segments found\n segments.append(c.astype('float32'))\n return segments" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n protos: [mask_dim, mask_h, mask_w]\n masks_in: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape: input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / shape[0], mw / shape[1]) # gain = old / new\n pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import os import platform import sys import torch from pathlib import Path from ultralytics.utils.plotting import Annotator, colors, save_one_box from models.common import DetectMultiBackend from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2, increment_path, non_max_suppression, print_args, scale_boxes, scale_segments, strip_optimizer) from utils.segment.general import masks2segments, process_mask, process_mask_native from utils.torch_utils import select_device, smart_inference_mode
11,286
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Run YOLOv5 segmentation inference on images, videos, directories, streams, etc. Usage - sources: $ python segment/predict.py --weights yolov5s-seg.pt --source 0 # webcam img.jpg # image vid.mp4 # video screen # screenshot path/ # directory list.txt # list of images list.streams # list of streams 'path/*.jpg' # glob 'https://youtu.be/LNwODJXcvt4' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream Usage - formats: $ python segment/predict.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_model # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
@smart_inference_mode()
11
2023-11-12 13:28:26+00:00
16k
RAIVNLab/MatFormer-OLMo
olmo/train.py
[ { "identifier": "PathOrStr", "path": "olmo/aliases.py", "snippet": "" }, { "identifier": "CheckpointType", "path": "olmo/config.py", "snippet": "class CheckpointType(StrEnum):\n sharded = \"sharded\"\n unsharded = \"unsharded\"" }, { "identifier": "SpeedMonitorConfig", "path": "olmo/config.py", "snippet": "class SpeedMonitorConfig(BaseConfig):\n window_size: int = 100\n gpu_flops_available: Optional[Union[float, int]] = None" }, { "identifier": "TrainConfig", "path": "olmo/config.py", "snippet": "class TrainConfig(BaseConfig):\n \"\"\"\n OLMo training configuration.\n \"\"\"\n\n run_name: Optional[str] = None\n \"\"\"\n The name of the run.\n \"\"\"\n\n seed: int = 6198\n \"\"\"\n Used to seed all initial RNG states.\n \"\"\"\n\n dry_run: bool = False\n \"\"\"\n If ``True``, don't actually train.\n \"\"\"\n\n model: ModelConfig = field(default_factory=ModelConfig)\n \"\"\"\n OLMo Model configuration.\n \"\"\"\n\n optimizer: OptimizerConfig = field(default_factory=OptimizerConfig)\n \"\"\"\n Optimizer configuration.\n \"\"\"\n\n scheduler: SchedulerConfig = field(default_factory=SchedulerConfig)\n \"\"\"\n Learning rate scheduler configuration.\n \"\"\"\n\n restore_base_learning_rate: bool = True\n \"\"\"\n Set to ``False`` if you want to restart with the base learning rate from the config, not the checkpoint.\n \"\"\"\n\n data: DataConfig = field(default_factory=DataConfig)\n \"\"\"\n Training data configuration.\n \"\"\"\n\n restore_dataloader: bool = True\n \"\"\"\n When restarting, restore the data loader to where it left off.\n If you restarting in order to train on a different dataset, set this to ``False``.\n \"\"\"\n\n fast_forward_batches: Optional[int] = None\n \"\"\"\n When restarting, use this to fast-forward the dataloader beyond the last checkpoint.\n This can be useful when restarting due to a loss spike in order to skip the data that\n corresponded to the spike.\n \"\"\"\n\n evaluators: List[EvaluatorConfig] = field(default_factory=list)\n \"\"\"\n Evaluation configurations.\n \"\"\"\n\n eval_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to run evaluations.\n \"\"\"\n\n tokenizer: TokenizerConfig = field(default_factory=TokenizerConfig)\n \"\"\"\n Tokenizer configuration.\n \"\"\"\n\n save_folder: str = \"./\"\n \"\"\"\n The directory to save checkpoints to.\n \"\"\"\n\n remote_save_folder: Optional[str] = None\n \"\"\"\n A folder in a cloud bucket to upload saved checkpoints to.\n \"\"\"\n\n save_interval: int = 1000\n \"\"\"\n How often (in terms of batches) to save training state checkpoints that can be used for restarts.\n \"\"\"\n\n save_interval_unsharded: Optional[int] = None\n \"\"\"\n How often (if at all) to save the unsharded state to a single file.\n For large models it can be costly to save these, so it usually makes sense to save\n these less often than regular (sharded) training checkpoints.\n \"\"\"\n\n matformer_factor: int = 1\n\n save_num_checkpoints_to_keep: int = -1\n \"\"\"\n How many checkpoints to keep.\n \"\"\"\n\n save_num_unsharded_checkpoints_to_keep: int = -1\n \"\"\"\n How many unsharded checkpoints to keep.\n \"\"\"\n\n save_overwrite: bool = False\n \"\"\"\n If ``True``, overwrite any conflicting checkpoint files.\n \"\"\"\n\n force_save_unsharded: bool = False\n \"\"\"\n Save an unsharded checkpoint before training (even during a dry run).\n Use this option with `--load-path={PATH}` and `--dry_run` to convert a sharded\n checkpoint into an unsharded checkpoint.\n \"\"\"\n\n load_path: Optional[str] = None\n \"\"\"\n The path to a (sharded) training checkpoint to restore/resume from.\n \"\"\"\n\n max_duration: int = 10000\n \"\"\"\n Maximum number of batches to train for.\n \"\"\"\n\n global_train_batch_size: int = 512\n \"\"\"\n The effective global batch size.\n \"\"\"\n\n device_train_batch_size: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``global_train_batch_size // world_size``.\n \"\"\"\n\n device_train_microbatch_size: int = 16\n \"\"\"\n The number of instances passed to the model in a single forward-backward pass. You should set\n this as large as you can based on available GPU memory.\n \"\"\"\n\n device_eval_batch_size: int = 16\n \"\"\"\n The number of evaluation instances passed to the model in a single forward pass on each device.\n \"\"\"\n\n eval_subset_num_batches: int = -1\n \"\"\"\n The number of batches to use for downstream evaluation from each dataset.\n \"\"\"\n\n eval_on_load: bool = False\n \"\"\"\n When resuming from a checkpoint, run the evaluation loop right away.\n \"\"\"\n\n device_train_grad_accum: Optional[int] = None # calculated automatically\n \"\"\"\n Don't set this manually. This will be set to ``device_train_batch_size // device_train_microbatch_size``.\n \"\"\"\n\n max_grad_norm: Optional[float] = None\n \"\"\"\n Clip gradients to this value if set.\n \"\"\"\n\n precision: Optional[str] = None\n \"\"\"\n Precision to train with (e.g. \"amp_bf16\", \"amp_fp16\", or \"fp32\").\n \"\"\"\n\n wandb: Optional[WandbConfig] = None\n \"\"\"\n Weights & Biases configuration.\n \"\"\"\n\n speed_monitor: SpeedMonitorConfig = field(default_factory=SpeedMonitorConfig)\n \"\"\"\n Speed monitor configuration.\n \"\"\"\n\n console_log_interval: int = 1\n \"\"\"\n How often to log to the console.\n \"\"\"\n\n compile: Optional[CompilerConfig] = None\n \"\"\"\n Settings for compiling the model with ``torch.compile()``.\n \"\"\"\n\n activation_checkpointing: bool = False\n \"\"\"\n Use activation checkpointing on transformer blocks.\n \"\"\"\n\n fsdp: FSDPConfig = field(default_factory=FSDPConfig)\n \"\"\"\n Fully sharded data parallel settings.\n \"\"\"\n\n softmax_auxiliary_loss: bool = False\n \"\"\"\n If ``True``, we add the auxiliary loss function from PaLM that encourages the softmax\n normalizing term to be close to 0.\n \"\"\"\n\n time_limit: Optional[float] = 60 * 60 * 119.5\n \"\"\"\n The maximum amount of time to train for before saving a checkpoint and ending early.\n On LUMI we have 48 hours max per job, so we default to just under 48 hours to give us time\n to write out a final checkpoint.\n \"\"\"\n\n early_stopping_factor: Optional[float] = None\n\n save_data_indices: bool = True\n \"\"\"\n Save training data indices from each batch for each worker.\n \"\"\"\n\n @property\n def autocast_precision(self) -> torch.dtype:\n if self.precision == \"amp_bf16\":\n return torch.bfloat16\n elif self.precision == \"amp_fp16\":\n return torch.float16\n elif self.precision == \"fp32\":\n return torch.float32\n else:\n raise ValueError(f\"Unexpected precision type '{self.precision}'\")" }, { "identifier": "IterableDataset", "path": "olmo/data/iterable_dataset.py", "snippet": "class IterableDataset(torch.utils.data.IterableDataset[Dict[str, Any]]):\n \"\"\"\n Adapted from PyTorch's DistributedSampler, this wraps a Dataset or arbitrary sequence\n as an IterableDataset that can be deterministically restarted at any point by setting `start_index`,\n which should be a multiple of your global batch size.\n Similarly `max_examples`, if set, should be a multiple of global batch size.\n \"\"\"\n\n def __init__(\n self,\n dataset: Union[Sequence[List[int]], Sequence[torch.Tensor], Sequence[Dict[str, Any]]],\n *,\n seed: int = 0,\n start_index: int = 0,\n max_examples: Optional[int] = None,\n shuffle: bool = True,\n drop_last: bool = False,\n world_size: Optional[int] = None,\n rank: Optional[int] = None,\n work_dir: Optional[PathOrStr] = None,\n ):\n self.dataset = dataset\n self.seed = seed\n self.start_index = start_index\n self.max_examples = max_examples\n self.shuffle = shuffle\n self.drop_last = drop_last\n self.rank = rank if rank is not None else get_global_rank()\n self.world_size = world_size if world_size is not None else get_world_size()\n # If the dataset length is evenly divisible by # of replicas, then there\n # is no need to drop any data, since the dataset will be split equally.\n if self.drop_last and len(self.dataset) % self.world_size != 0: # type: ignore[arg-type]\n # Split to nearest available length that is evenly divisible by world size.\n # This is to ensure each rank receives the same amount of data.\n num_samples = math.ceil(\n (len(self.dataset) - self.world_size) / self.world_size # type: ignore[arg-type]\n )\n else:\n num_samples = math.ceil(len(self.dataset) / self.world_size) # type: ignore[arg-type]\n self.total_size = num_samples * self.world_size\n self.global_indices_file: Optional[Path] = None\n if work_dir is not None:\n self.global_indices_file = Path(work_dir) / \"global_indices.npy\"\n if self.rank == 0:\n log.info(\"Saving global data order indices...\")\n self.global_indices_file.parent.mkdir(parents=True, exist_ok=True)\n global_indices = self._build_global_indices()\n global_indices_mmap = np.memmap(\n self.global_indices_file, dtype=np.uint64, mode=\"w+\", shape=(len(global_indices),)\n )\n global_indices_mmap[:] = global_indices\n global_indices_mmap.flush()\n del global_indices_mmap\n log.info(\"Global data order indices saved to '%s'\", self.global_indices_file)\n barrier()\n\n def _build_global_indices(self) -> List[int]:\n if self.shuffle:\n # Deterministically shuffle based on epoch and seed\n # Torch built-in randomness is not very random, so we use numpy.\n rng = np.random.Generator(np.random.PCG64(seed=self.seed))\n indices = np.arange(len(self.dataset))\n rng.shuffle(indices)\n indices = list(indices)\n else:\n indices = list(range(len(self.dataset))) # type: ignore[arg-type]\n\n if not self.drop_last:\n # Add extra samples to make it evenly divisible\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n else:\n # Remove tail of data to make it evenly divisible.\n indices = indices[: self.total_size]\n assert len(indices) == self.total_size\n return indices\n\n def get_global_indices(self) -> Sequence[int]:\n if self.global_indices_file is not None:\n return np.memmap(self.global_indices_file, mode=\"r\", dtype=np.uint64) # type: ignore\n else:\n return self._build_global_indices()\n\n def __iter__(self) -> Iterator[Dict[str, Any]]:\n indices = self.get_global_indices()\n\n # Truncate to max_examples.\n if self.max_examples is not None:\n assert self.max_examples % self.world_size == 0\n indices = indices[: self.max_examples]\n\n # Start at the specified index.\n if self.start_index > 0:\n assert self.start_index % self.world_size == 0\n indices = indices[self.start_index :]\n\n # Slice indices by rank to avoid duplicates.\n indices = indices[self.rank : self.total_size : self.world_size]\n\n # Lastly, slice the indices by data loader worker rank to avoid duplicates.\n worker_info = torch.utils.data.get_worker_info()\n if worker_info is not None:\n indices = indices[worker_info.id :: worker_info.num_workers]\n\n # Convert to a list at this point so we don't have to rely on memory-mapping.\n if isinstance(indices, np.memmap):\n indices_list = indices.tolist() # type: ignore\n else:\n indices_list = indices\n del indices\n\n return (self._get_dataset_item(int(idx)) for idx in indices_list)\n\n def _get_dataset_item(self, idx: int) -> Dict[str, Any]:\n item = self.dataset[idx]\n if isinstance(item, dict):\n return dict(**item, index=idx)\n else:\n return {\"input_ids\": item, \"index\": idx}" }, { "identifier": "Evaluator", "path": "olmo/eval/evaluator.py", "snippet": "class Evaluator:\n label: str\n type: EvaluatorType\n eval_loader: DataLoader\n eval_metric: Union[Metric, Dict[str, Metric]]\n subset_num_batches: Optional[int] = None\n\n def reset_metrics(self) -> None:\n if isinstance(self.eval_metric, Metric):\n self.eval_metric.reset()\n else:\n for metric in self.eval_metric.values():\n metric.reset()\n\n def compute_metrics(self) -> Dict[str, float]:\n if self.type == EvaluatorType.downstream:\n assert isinstance(self.eval_metric, ICLMetric)\n return {\n f\"eval/downstream/{self.label}_{self.eval_metric.metric_type}\": self.eval_metric.compute().item(),\n }\n elif self.type == EvaluatorType.lm:\n # Metric(s) = cross entropy loss\n metrics: Dict[str, Metric]\n if isinstance(self.eval_metric, Metric):\n metrics = {self.label: self.eval_metric}\n else:\n metrics = self.eval_metric\n out = {}\n for label in sorted(metrics.keys()):\n metric = metrics[label]\n assert isinstance(metric, MeanMetric)\n if metric.weight.item() == 0.0: # type: ignore\n # In this case we probably haven't called '.update()' on this metric yet,\n # so we do so here with dummy values. Since we pass 0.0 in for weight this won't\n # affect the final value.\n # This can happen when the evaluator contains multiple tasks/datasets and we didn't\n # get to this one within the current evaluation loop.\n metric.update(0.0, 0.0)\n loss = metric.compute()\n if loss.isnan().item():\n # This can happen when the evaluator contains multiple tasks/datasets and we didn't\n # get to this one within the current evaluation loop.\n continue\n else:\n out[f\"eval/{label}/CrossEntropyLoss\"] = loss.item()\n out[f\"eval/{label}/Perplexity\"] = (2**(loss)).item()\n return out\n else:\n raise ValueError(f\"Unexpected evaluator type '{self.type}'\")\n\n def update_metrics(\n self,\n batch: Dict[str, Any],\n ce_loss: torch.Tensor,\n logits: torch.Tensor,\n matformer_factor = 1\n ) -> None:\n if self.type == EvaluatorType.downstream:\n assert isinstance(self.eval_metric, ICLMetric)\n self.eval_metric.update(batch, logits) # type: ignore\n elif self.type == EvaluatorType.lm:\n # Metric(s) = cross entropy loss\n for metadata, instance_loss in zip(batch[\"metadata\"], ce_loss):\n if isinstance(self.eval_metric, dict):\n metric = self.eval_metric[metadata[\"label\"]]\n else:\n metric = self.eval_metric\n metric.update(instance_loss)\n else:\n raise ValueError(f\"Unexpected evaluator type '{self.type}'\")" }, { "identifier": "OlmoConfigurationError", "path": "olmo/exceptions.py", "snippet": "class OlmoConfigurationError(OlmoError):\n \"\"\"\n An error with a configuration file.\n \"\"\"" }, { "identifier": "Olmo", "path": "olmo/model.py", "snippet": "class Olmo(nn.Module):\n def __init__(self, config: ModelConfig, init_params: bool = True):\n super().__init__()\n self.config = config\n\n # Validate config.\n if self.config.alibi and self.config.flash_attention:\n raise OlmoConfigurationError(\"ALiBi is currently not supported with FlashAttention\")\n\n if self.config.alibi and self.config.rope:\n raise OlmoConfigurationError(\"ALiBi and RoPE are mutually exclusive\")\n\n if self.config.embedding_size is not None and self.config.embedding_size != self.config.vocab_size:\n if self.config.embedding_size < self.config.vocab_size:\n raise OlmoConfigurationError(\"embedding size should be at least as big as vocab size\")\n elif self.config.embedding_size % 128 != 0:\n import warnings\n\n warnings.warn(\n \"Embedding size is not a multiple of 128! This could hurt throughput performance.\", UserWarning\n )\n\n torch.backends.cuda.enable_flash_sdp(self.config.flash_attention)\n torch.backends.cuda.enable_mem_efficient_sdp(False) # this is super slow so make sure torch won't use it\n\n self.transformer = nn.ModuleDict(\n dict(\n wte=nn.Embedding(\n config.embedding_size or config.vocab_size, config.d_model, device=config.init_device\n ),\n emb_drop=nn.Dropout(config.embedding_dropout),\n blocks=nn.ModuleList([OlmoBlock.build(config) for _ in range(config.n_layers)]),\n ln_f=LayerNorm.build(config),\n )\n )\n if not (self.config.alibi or self.config.rope):\n self.transformer.update(\n {\"wpe\": nn.Embedding(config.max_sequence_length, config.d_model, device=config.init_device)}\n )\n if init_params and self.config.init_device != \"meta\":\n self.apply(self.param_init_fn)\n self.__num_fwd_flops: Optional[int] = None\n\n # Attention bias cache.\n # We could cache these as buffers, but we've run into various issues doing that with FSDP.\n # In general it appears the way FSDP handles buffers is not well-defined.\n # It doesn't shard them but apparently it does synchronize them across processes, which we want to avoid\n # since (A) it isn't necessary, and (B) we have `-inf` in these biases which might get turned into\n # NaNs when they're synchronized due to casting or some other issue.\n self.__bias_cache: Dict[str, Optional[torch.FloatTensor]] = {\n \"causal_attention_bias\": None,\n \"alibi_attention_bias\": None,\n }\n if self.config.alibi:\n # Warm up cache.\n self.causal_attention_bias\n self.alibi_attention_bias\n\n @property\n def device(self) -> torch.device:\n device: torch.device = self.transformer.wte.weight.device # type: ignore\n if device.type == \"meta\":\n if self.config.init_device is not None and self.config.init_device != \"meta\":\n return torch.device(self.config.init_device)\n else:\n return torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n else:\n return device\n\n @property\n def causal_attention_bias(self) -> torch.FloatTensor:\n causal_bias = self.__bias_cache[\"causal_attention_bias\"]\n if causal_bias is None:\n causal_bias = causal_attention_bias(self.config, self.device)\n self.__bias_cache[\"causal_attention_bias\"] = causal_bias\n elif causal_bias.device != self.device: # in case model was moved to different device\n causal_bias = causal_bias.to(device=self.device)\n self.__bias_cache[\"causal_attention_bias\"] = causal_bias # type: ignore\n return causal_bias # type: ignore\n\n @property\n def alibi_attention_bias(self) -> torch.FloatTensor:\n alibi_bias = self.__bias_cache[\"alibi_attention_bias\"]\n if alibi_bias is None:\n alibi_bias = alibi_attention_bias(self.config, self.device)\n self.__bias_cache[\"alibi_attention_bias\"] = alibi_bias\n elif alibi_bias.device != self.device: # in case model was moved to different device\n alibi_bias = alibi_bias.to(device=self.device)\n self.__bias_cache[\"alibi_attention_bias\"] = alibi_bias # type: ignore\n return alibi_bias # type: ignore\n\n def forward(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n attention_bias: Optional[torch.Tensor] = None,\n past_key_values: Optional[Sequence[Tuple[torch.Tensor, torch.Tensor]]] = None,\n use_cache: bool = False,\n last_logits_only: bool = False,\n ) -> OlmoOutput:\n \"\"\"\n :param input_ids: A tensor of shape `(batch_size, seq_len)`.\n :param attention_mask: A tensor of shape `(batch_size, seq_len)` that indicates\n which input IDs are masked. A `1` value in the mask means that\n the corresponding input ID should *not* be ignored. A `0` means\n that the corresponding input ID is masked.\n\n This has the same meaning as the `attention_mask` in HuggingFace's `transformers`\n library.\n :param attention_bias: A tensor of shape `(batch_size, 1, seq_len, seq_len)`,\n `(1, 1, seq_len, seq_len)`, or `(seq_len, seq_len)`. This is used\n to introduce causal or other biases.\n\n If the tensor is a bool or byte tensor, a `True` or `1` at `attention_bias[:, :, i, j]`\n indicates that the i-th element in the sequence is allowed to attend to the j-th\n element in the sequence.\n\n If the tensor is a float tensor, it will just be added to the attention\n scores before the softmax.\n\n The default is causal, which corresponds to a lower-diagonal byte matrix of ones.\n :param past_key_values: Pre-computed keys and values for each attention block.\n Can be used to speed up sequential decoding. The `input_ids` which have\n their past given to this model should not be passed as `input_ids` as they have already been computed.\n :param use_cache: If `True`, return key and value tensors for each block.\n :param last_logits_only: If `True`, only compute the logits for the last token of each sequence.\n This can speed up decoding when you only care about the next token.\n \"\"\"\n if past_key_values:\n assert len(past_key_values) == self.config.n_layers\n\n batch_size, seq_len = input_ids.size()\n assert seq_len <= self.config.max_sequence_length, (\n f\"Cannot forward input with seq_len={seq_len}, \"\n f\"this model only supports seq_len<={self.config.max_sequence_length}\"\n )\n\n # Get embeddings of input.\n # shape: (batch_size, seq_len, d_model)\n x = self.transformer.wte(input_ids) # type: ignore\n\n if not (self.config.alibi or self.config.rope):\n # Get positional embeddings.\n if past_key_values is None:\n past_length = 0\n else:\n past_length = past_key_values[0][0].size(-2)\n # shape: (1, seq_len)\n pos = torch.arange(\n past_length, past_length + seq_len, dtype=torch.long, device=input_ids.device\n ).unsqueeze(0)\n # shape: (1, seq_len, d_model)\n pos_emb = self.transformer.wpe(pos) # type: ignore\n x = pos_emb + x\n\n # Add input + positional embeddings and apply dropout.\n # shape: (batch_size, seq_len, d_model)\n x = self.transformer.emb_drop(x) # type: ignore\n\n # Transform the attention mask into what the blocks expect.\n if attention_mask is not None:\n # shape: (batch_size, 1, 1, seq_len)\n attention_mask = attention_mask.to(dtype=x.dtype).view(batch_size, -1)[:, None, None, :]\n attention_mask = (1.0 - attention_mask) * torch.finfo(attention_mask.dtype).min\n attention_mask.masked_fill_(attention_mask == 1.0, float(\"-inf\"))\n\n # Merge attention mask with attention bias.\n if (\n attention_bias is not None\n or attention_mask is not None\n or self.config.alibi\n # NOTE (epwalsh): we need to initialize the attn bias in order for attn to work properly\n # with key+value cache. Otherwise `F.scaled_dot_product_attention()` doesn't seem to compute\n # scores correctly.\n or past_key_values is not None\n ):\n if attention_bias is None and self.config.alibi:\n attention_bias = self.causal_attention_bias + self.alibi_attention_bias\n elif attention_bias is None:\n attention_bias = self.causal_attention_bias\n elif attention_bias.dtype in (torch.int8, torch.bool):\n attention_bias = attention_bias.to(dtype=x.dtype)\n attention_bias.masked_fill_(attention_bias == 0.0, float(\"-inf\"))\n\n # Transform to the right shape and data type.\n mask_len = seq_len\n if attention_mask is not None:\n mask_len = attention_mask.shape[-1]\n elif past_key_values is not None:\n mask_len = past_key_values[0][0].shape[-2] + input_ids.shape[-1]\n attention_bias = attention_bias[:, :, :mask_len, :mask_len].to(x.dtype)\n\n # Add in the masking bias.\n if attention_mask is not None:\n attention_bias = attention_bias + attention_mask\n\n attn_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = [] if use_cache else None\n\n # Apply blocks one-by-one.\n for block, layer_past in zip(\n self.transformer.blocks, # type: ignore\n past_key_values or [None] * self.config.n_layers, # type: ignore\n ):\n # shape: (batch_size, seq_len, d_model)\n x, cache = block(x, attention_bias=attention_bias, layer_past=layer_past, use_cache=use_cache)\n if attn_key_values is not None:\n assert cache is not None\n attn_key_values.append(cache)\n\n if last_logits_only:\n # shape: (batch_size, 1, d_model)\n x = x[:, -1, :].unsqueeze(1)\n\n # Apply final layer norm.\n # shape: (batch_size, seq_len or 1, d_model)\n x = self.transformer.ln_f(x) # type: ignore\n\n # Get logits.\n # shape: (batch_size, seq_len or 1, vocab_size)\n logits = F.linear(x, self.transformer.wte.weight, None) # type: ignore\n\n return OlmoOutput(logits=logits, attn_key_values=attn_key_values) # type: ignore[arg-type]\n\n def fsdp_wrap_fn(self, module, recurse: bool = True, nonwrapped_numel: int = 0):\n del recurse, nonwrapped_numel\n return isinstance(module, OlmoBlock)\n\n def activation_checkpointing_fn(self, module):\n return isinstance(module, OlmoBlock)\n\n def reset_parameters(self):\n self.apply(self.param_init_fn)\n\n def param_init_fn(self, module):\n from functools import partial\n\n init_fn = partial(nn.init.normal_, mean=0.0, std=self.config.init_std)\n\n def fused_init_fn(module):\n # Parameter initialization is often based on the parameters shape.\n # If a layer is fused, initialization should be based on the shapes\n # of the original tensor instead of the shape of the fused tensor.\n # Layers which are fused should have the _fused attribute defined.\n # The first element of _fused is the dimension along which the tensor is fused.\n # This is followed by an iterable of split indices.\n _fused = getattr(module, \"_fused\", None)\n if _fused is None:\n raise RuntimeError(\"Internal logic error\")\n\n dim, splits = _fused\n splits = (0, *splits, module.weight.size(dim))\n for s, e in zip(splits[:-1], splits[1:]):\n slice_indices = [slice(None)] * module.weight.ndim\n slice_indices[dim] = slice(s, e)\n init_fn(module.weight[slice_indices])\n\n # Linear\n if isinstance(module, nn.Linear):\n if hasattr(module, \"_fused\"):\n fused_init_fn(module)\n else:\n init_fn(module.weight)\n\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n\n if getattr(module, \"_is_residual\", False):\n with torch.no_grad():\n module.weight.div_(math.sqrt(2 * self.config.n_layers))\n\n if module.bias is not None:\n nn.init.zeros_(module.bias)\n\n # Embedding\n if isinstance(module, nn.Embedding):\n init_fn(module.weight)\n\n # LayerNorm\n if isinstance(module, (nn.LayerNorm, LayerNorm, RMSLayerNorm)):\n torch.nn.init.ones_(module.weight)\n torch.nn.init.zeros_(module.bias)\n\n def num_params(self, include_embedding: bool = True) -> int:\n \"\"\"\n Get the total number of parameters.\n \"\"\"\n params = (np for np in self.named_parameters())\n if not include_embedding:\n params = filter( # type: ignore\n lambda np: \".wte.\" not in np[0] and \".wpe.\" not in np[0],\n params,\n )\n return sum(p.numel() for _, p in params)\n\n @property\n def num_fwd_flops(self):\n if self.__num_fwd_flops:\n return self.__num_fwd_flops\n n_params = self.num_params()\n # the number of parameters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.config.max_sequence_length\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = (\n self.config.n_layers * 2 * 2 * (self.config.d_model * (self.config.max_sequence_length**2))\n )\n self.__num_fwd_flops = params_flops_per_seq + attn_flops_per_seq\n return self.__num_fwd_flops\n\n def generate(\n self,\n input_ids: torch.LongTensor,\n attention_mask: Optional[torch.Tensor] = None,\n attention_bias: Optional[torch.Tensor] = None,\n max_steps: int = 10,\n beam_size: int = 1,\n per_node_beam_size: Optional[int] = None,\n sampler: Optional[Sampler] = None,\n min_steps: Optional[int] = None,\n final_sequence_scorer: Optional[FinalSequenceScorer] = None,\n constraints: Optional[List[Constraint]] = None,\n ) -> OlmoGenerateOutput:\n \"\"\"\n Generate token IDs using beam search.\n\n Note that by default ``beam_size`` is set to 1, which is greedy decoding.\n\n :param input_ids: A tensor of shape `(batch_size, seq_len)`.\n :param attention_mask: A optional tensor of shape `(batch_size, seq_len)`, the same\n as for the forward method.\n :param attention_bias: A tensor of shape\n `(batch_size, 1, seq_len + tokens_to_generate, seq_len + tokens_to_generate)`,\n the same as for the forward method except only one shape is excepted here.\n\n For an explanation of the other arguments, see the :class:`BeamSearch` class.\n \"\"\"\n beam_search = BeamSearch(\n self.config.eos_token_id,\n max_steps=max_steps,\n beam_size=beam_size,\n per_node_beam_size=per_node_beam_size,\n sampler=sampler,\n min_steps=min_steps,\n final_sequence_scorer=final_sequence_scorer,\n constraints=constraints,\n )\n\n # Validate inputs.\n batch_size, seq_len = input_ids.shape\n if attention_mask is not None:\n assert attention_mask.shape == (batch_size, seq_len)\n if attention_bias is not None:\n assert len(attention_bias.shape) == 4\n assert attention_bias.shape[:2] == (batch_size, 1)\n assert (\n seq_len + beam_search.max_steps\n <= attention_bias.shape[2]\n == attention_bias.shape[3]\n <= self.config.max_sequence_length\n )\n\n tokens_generated = 0\n\n def flatten_past_key_values(\n past_key_values: List[Tuple[torch.Tensor, torch.Tensor]]\n ) -> Dict[str, torch.Tensor]:\n out = {}\n for i, (key, value) in enumerate(past_key_values):\n out[f\"past_key_{i}\"] = key\n out[f\"past_value_{i}\"] = value\n return out\n\n def unflatten_past_key_values(\n past_key_values: Dict[str, torch.Tensor]\n ) -> List[Tuple[torch.Tensor, torch.Tensor]]:\n out = []\n for i in range(self.config.n_layers):\n past_key = past_key_values[f\"past_key_{i}\"]\n past_value = past_key_values[f\"past_value_{i}\"]\n out.append((past_key, past_value))\n return out\n\n def step(\n last_predictions: torch.Tensor, state: dict[str, torch.Tensor]\n ) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:\n nonlocal tokens_generated\n\n attention_mask = state.get(\"attention_mask\")\n attention_bias = state.get(\"attention_bias\")\n\n if tokens_generated > 0:\n past_key_values = unflatten_past_key_values(state)\n input_ids = last_predictions.unsqueeze(1)\n if attention_mask is not None:\n group_size = input_ids.shape[0]\n attention_mask = torch.cat((attention_mask, attention_mask.new_ones((group_size, 1))), dim=-1)\n else:\n past_key_values = None\n input_ids = state[\"input_ids\"]\n\n tokens_generated += 1\n\n # Run forward pass of model to get logits, then normalize to get log probs.\n output = self(\n input_ids,\n attention_mask=attention_mask,\n attention_bias=attention_bias,\n past_key_values=past_key_values,\n use_cache=True,\n last_logits_only=True,\n )\n log_probs = F.log_softmax(output.logits[:, -1, :], dim=-1)\n\n # Create new state.\n state = flatten_past_key_values(output.attn_key_values)\n if attention_mask is not None:\n state[\"attention_mask\"] = attention_mask\n if attention_bias is not None:\n state[\"attention_bias\"] = attention_bias\n\n return log_probs, state\n\n initial_preds = input_ids.new_zeros((batch_size,)) # This is arbitrary, we won't use this.\n state: dict[str, torch.Tensor] = {\"input_ids\": input_ids}\n if attention_mask is not None:\n state[\"attention_mask\"] = attention_mask\n if attention_bias is not None:\n state[\"attention_bias\"] = attention_bias\n with torch.no_grad():\n token_ids, scores = beam_search.search(initial_preds, state, step)\n\n return OlmoGenerateOutput(\n token_ids=token_ids, # type: ignore[arg-type]\n scores=scores, # type: ignore[arg-type]\n )\n\n @classmethod\n def from_checkpoint(cls, checkpoint_dir: PathOrStr, device: str = \"cpu\") -> Olmo:\n \"\"\"\n Load an OLMo model from a checkpoint.\n \"\"\"\n from cached_path import cached_path\n\n # Load config.\n config_path = cached_path(os.path.join(checkpoint_dir, \"config.yaml\"))\n model_config = ModelConfig.load(config_path, key=\"model\", validate_paths=False)\n\n # Initialize model (always on CPU to start with so we don't run out of GPU memory).\n model_config.init_device = \"cpu\"\n model = Olmo(model_config)\n model.config.init_device = device\n\n # Load state dict directly to target device.\n state_dict_path = cached_path(os.path.join(checkpoint_dir, \"model.pt\"))\n state_dict = torch.load(state_dict_path, map_location=\"cpu\")\n model.load_state_dict(model._make_state_dict_compatible(state_dict))\n\n return model.to(torch.device(device)).eval()\n\n def _make_state_dict_compatible(self, state_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n # For backwards compatibility prior to fixing https://github.com/allenai/LLM/issues/222\n if self.config.block_type == BlockType.sequential:\n for block_idx in range(self.config.n_layers):\n norm_w_key = f\"transformer.blocks.{block_idx}.norm.weight\"\n norm_b_key = f\"transformer.blocks.{block_idx}.norm.bias\"\n if norm_w_key in state_dict:\n norm_w = state_dict.pop(norm_w_key)\n state_dict[f\"transformer.blocks.{block_idx}.attn_norm.weight\"] = norm_w\n state_dict[f\"transformer.blocks.{block_idx}.ff_norm.weight\"] = norm_w.clone()\n if norm_b_key in state_dict:\n norm_b = state_dict.pop(norm_b_key)\n state_dict[f\"transformer.blocks.{block_idx}.attn_norm.bias\"] = norm_b\n state_dict[f\"transformer.blocks.{block_idx}.ff_norm.bias\"] = norm_b.clone()\n return state_dict" }, { "identifier": "MatformerManager", "path": "olmo/model.py", "snippet": "class MatformerManager:\n _instance = None\n\n def __init__(self):\n raise RuntimeError(\"Call get_instance() instead\")\n\n def initialize(self):\n self.current_factor = 1\n\n @classmethod\n def get_instance(cls):\n if cls._instance is None:\n cls._instance = cls.__new__(cls)\n cls._instance.initialize()\n return cls._instance" }, { "identifier": "set_new_base_lr", "path": "olmo/optim.py", "snippet": "def set_new_base_lr(\n optim: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler.LRScheduler, new_base_lr: float\n):\n \"\"\"\n Set a new base learning rate in the optimizer and scheduler.\n \"\"\"\n # Hack scheduler state to start with the new base LR.\n if isinstance(scheduler, torch.optim.lr_scheduler.SequentialLR):\n # Update 'base_lr' for all sub-schedulers.\n for sched in scheduler._schedulers: # type: ignore\n sched.base_lrs = [new_base_lr] * len(sched.base_lrs)\n\n # Update '_last_lr' for current sub-scheduler.\n current_sched = scheduler._schedulers[bisect_right(scheduler._milestones, scheduler.last_epoch)] # type: ignore\n if hasattr(current_sched, \"_get_closed_form_lr\"):\n current_sched._last_lr = current_sched._get_closed_form_lr()\n elif isinstance(current_sched, torch.optim.lr_scheduler.LambdaLR):\n current_sched._last_lr = current_sched.get_lr() # type: ignore\n else:\n raise NotImplementedError\n scheduler._last_lr = current_sched.get_last_lr() # type: ignore\n else:\n raise NotImplementedError\n\n # Update LR in optimizer.\n for param_group, new_lr in zip(optim.param_groups, scheduler.get_last_lr()):\n param_group[\"lr\"] = new_lr\n param_group[\"initial_lr\"] = new_base_lr" }, { "identifier": "barrier", "path": "olmo/util.py", "snippet": "def barrier() -> None:\n if dist.is_available() and dist.is_initialized():\n dist.barrier()" }, { "identifier": "get_global_rank", "path": "olmo/util.py", "snippet": "def get_global_rank() -> int:\n return int(os.environ.get(\"RANK\") or dist.get_rank())" }, { "identifier": "get_world_size", "path": "olmo/util.py", "snippet": "def get_world_size() -> int:\n if dist.is_available() and dist.is_initialized():\n return dist.get_world_size()\n else:\n return 1" }, { "identifier": "move_to_device", "path": "olmo/util.py", "snippet": "def move_to_device(o: T, device: torch.device) -> T:\n if isinstance(o, torch.Tensor):\n return o.to(device) # type: ignore[return-value]\n elif isinstance(o, dict):\n return {k: move_to_device(v, device) for k, v in o.items()} # type: ignore[return-value]\n elif isinstance(o, list):\n return [move_to_device(x, device) for x in o] # type: ignore[return-value]\n elif isinstance(o, tuple):\n return tuple((move_to_device(x, device) for x in o)) # type: ignore[return-value]\n else:\n return o" }, { "identifier": "peak_gpu_memory", "path": "olmo/util.py", "snippet": "def peak_gpu_memory(reset: bool = False) -> Optional[float]:\n \"\"\"\n Get the peak GPU memory usage in MB across all ranks.\n Only rank 0 will get the final result.\n \"\"\"\n if not torch.cuda.is_available():\n return None\n\n device = torch.device(\"cuda\")\n peak_mb = torch.cuda.max_memory_allocated(device) / 1000000\n if dist.is_available() and dist.is_initialized():\n peak_mb_tensor = torch.tensor(peak_mb, device=device)\n dist.reduce(peak_mb_tensor, 0, dist.ReduceOp.MAX)\n peak_mb = peak_mb_tensor.item()\n\n if reset:\n # Reset peak stats.\n torch.cuda.reset_max_memory_allocated(device)\n\n return peak_mb" }, { "identifier": "resource_path", "path": "olmo/util.py", "snippet": "def resource_path(folder: PathOrStr, fname: str) -> PathOrStr:\n if is_url(folder):\n from cached_path import cached_path\n\n return cached_path(f\"{folder}/{fname}\")\n else:\n return Path(folder) / fname" }, { "identifier": "syncronize_flag", "path": "olmo/util.py", "snippet": "def syncronize_flag(flag: bool, device: torch.device) -> bool:\n if dist.is_available() and dist.is_initialized():\n flag_tensor = torch.tensor(flag, device=device)\n dist.broadcast(flag_tensor, 0)\n return flag_tensor.item() # type: ignore\n else:\n return flag" }, { "identifier": "upload", "path": "olmo/util.py", "snippet": "def upload(source: PathOrStr, target: str, save_overwrite: bool = False):\n \"\"\"Upload source file to a target location on GCS or S3.\"\"\"\n from urllib.parse import urlparse\n\n source = Path(source)\n assert source.is_file()\n parsed = urlparse(target)\n if parsed.scheme == \"gs\":\n _gcs_upload(source, parsed.netloc, parsed.path, save_overwrite=save_overwrite)\n elif parsed.scheme == \"s3\":\n _s3_upload(source, parsed.netloc, parsed.path, save_overwrite=save_overwrite)\n else:\n raise NotImplementedError(f\"Upload not implemented for '{parsed.scheme}' scheme\")" }, { "identifier": "wait_on", "path": "olmo/util.py", "snippet": "def wait_on(condition: Callable[[], bool], description: str, timeout: float = 10.0):\n \"\"\"Wait on the condition function to return True.\"\"\"\n start_time = time.monotonic()\n while not condition():\n time.sleep(0.5)\n if time.monotonic() - start_time > timeout:\n raise TimeoutError(f\"{description} timed out\")" } ]
import logging import math import random import shutil import time import numpy as np import torch import torch.nn.functional as F import wandb from collections import deque from dataclasses import dataclass, field from itertools import islice from pathlib import Path from typing import Any, Deque, Dict, List, Optional, TextIO, Tuple from packaging import version from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.distributed.fsdp.api import ( FullOptimStateDictConfig, ShardedOptimStateDictConfig, ShardedStateDictConfig, ) from torch.utils.data import DataLoader from torchmetrics import MeanMetric from .aliases import PathOrStr from .config import CheckpointType, SpeedMonitorConfig, TrainConfig from .data import IterableDataset from .eval import Evaluator from .exceptions import OlmoConfigurationError from .model import Olmo, MatformerManager from .optim import set_new_base_lr from .util import ( barrier, get_global_rank, get_world_size, move_to_device, peak_gpu_memory, resource_path, syncronize_flag, upload, wait_on, )
13,307
else: return False def eval(self) -> Dict[str, Any]: # Zero gradients and set model to 'eval' mode. self.optim.zero_grad(set_to_none=True) self.fsdp_model.eval() eval_metrics = {} for evaluator in self.evaluators: log.info(f"Running evaluation for '{evaluator.label}'...") # Run model over batches. if self.cfg.matformer_factor > 1: self.matmng = MatformerManager.get_instance() self.matmng.current_factor = 1 assert self.cfg.matformer_factor % 2 == 0 iters = int(math.log2(self.cfg.matformer_factor)) + 1 for i in range(iters): # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label} 1/{self.matmng.current_factor}", metrics) self.matmng.current_factor *= 2 for m in metrics: eval_metrics.pop(m, None) else: # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label}", metrics) del eval_batches return eval_metrics def fit(self): start_time = time.time() if self.cfg.load_path is not None and self.global_step > 0 and self.cfg.eval_on_load: eval_metrics = self.eval() if wandb.run is not None: wandb.log(eval_metrics, step=self.global_step) # Set model to 'train' mode. self.fsdp_model.train() # Initialize monitors. assert self.cfg.device_train_batch_size is not None speed_monitor = SpeedMonitor(self.cfg.speed_monitor) lr_monitor = LRMonitor(self.optim) # Log system metrics at the start of training. sys_metrics = self.system_metrics() if sys_metrics: self.log_metrics_to_console("Pre-train system metrics", sys_metrics) if wandb.run is not None: wandb.log(sys_metrics, step=0) # Train. first_batch: bool = True for batch in self.train_loader: # Bookkeeping. # NOTE: To track the global batch size / number of tokens per batch we make the assumption that all # batches see the same number of tokens, which should be the case for language model pre-training # (at least when drop_last=True). # Alternatively we'd have to use a distributed all reduce over seq_len here, but I don't want that overhead. # So for now I'm putting these assertions here so if the assumption is violated it will fail loudly. batch_size, seq_len = batch["input_ids"].shape assert seq_len == self.cfg.model.max_sequence_length assert batch_size == self.cfg.device_train_batch_size
from __future__ import annotations __all__ = ["SpeedMonitor", "LRMonitor", "Trainer"] log = logging.getLogger(__name__) @dataclass class SpeedMonitor: cfg: SpeedMonitorConfig start_times: Deque[float] = field(default_factory=lambda: deque([])) global_total_tokens: int = 0 device_interval_tokens: Deque[int] = field(default_factory=lambda: deque([])) def batch_start(self, global_total_tokens: int, device_batch_num_tokens: int, record: bool = True) -> None: self.global_total_tokens = global_total_tokens if record: if len(self.start_times) >= self.cfg.window_size: self.start_times.popleft() self.device_interval_tokens.popleft() self.start_times.append(time.monotonic()) self.device_interval_tokens.append(device_batch_num_tokens) def reset(self) -> None: self.start_times.clear() self.device_interval_tokens.clear() def check(self) -> Dict[str, float]: metrics: Dict[str, float] = {"throughput/total_tokens": self.global_total_tokens} if self.start_times: interval_seconds = time.monotonic() - self.start_times[0] interval_batches = len(self.start_times) interval_tokens = sum(self.device_interval_tokens) metrics["throughput/device/tokens_per_second"] = interval_tokens / interval_seconds metrics["throughput/device/batches_per_second"] = interval_batches / interval_seconds return metrics @dataclass class LRMonitor: optim: torch.optim.Optimizer def check(self) -> Dict[str, float]: lrs = [group["lr"] for group in self.optim.param_groups] return {f"optim/learning_rate_group{idx}": lr for idx, lr in enumerate(lrs)} @dataclass class Trainer: cfg: TrainConfig model: Olmo fsdp_model: FSDP optim: torch.optim.Optimizer scheduler: torch.optim.lr_scheduler.LRScheduler train_loader: DataLoader device: torch.device evaluators: List[Evaluator] ce_train_loss_metric: MeanMetric z_train_loss_metric: Optional[MeanMetric] = None global_step: int = 0 global_data_step: int = 0 """This is now redundant since adding 'global_train_examples_seen'.""" global_train_examples_seen: int = 0 """Tracks the global number of training examples seen for the purpose of restoring the dataset position on restarts.""" global_train_tokens_seen: int = 0 """Tracks the global total number of tokens trained on.""" checkpoints: List[Path] = field(default_factory=list) unsharded_checkpoints: List[Path] = field(default_factory=list) min_train_loss: float = float("inf") indices_file: Optional[TextIO] = None def state_dict(self) -> Dict[str, Any]: state_dict = self.non_tensor_state_dict() state_dict["model"] = self.fsdp_model.state_dict() state_dict["optim"] = FSDP.optim_state_dict(self.fsdp_model, self.optim) return state_dict def non_tensor_state_dict(self) -> Dict[str, Any]: return { "scheduler": self.scheduler.state_dict(), "global_step": self.global_step, "global_data_step": self.global_data_step, "global_train_examples_seen": self.global_train_examples_seen, "global_train_tokens_seen": self.global_train_tokens_seen, "checkpoints": self.checkpoints, "unsharded_checkpoints": self.unsharded_checkpoints, "rng": { "python": random.getstate(), "numpy": np.random.get_state(), "torch": torch.random.get_rng_state(), "cuda": torch.cuda.get_rng_state(), }, } def load_non_tensor_state_dict(self, state_dict: Dict[str, Any]) -> None: # Checkpoint paths. self.checkpoints = [ path for path in state_dict["checkpoints"] if path.is_dir() and path.resolve().parent == Path(self.cfg.save_folder).resolve() ] self.unsharded_checkpoints = [ path for path in state_dict["unsharded_checkpoints"] if path.is_dir() and path.resolve().parent == Path(self.cfg.save_folder).resolve() ] # Learning rate scheduler. self.scheduler.load_state_dict(state_dict["scheduler"]) # Dataset / dataloader position. self.global_step = state_dict["global_step"] self.global_data_step = state_dict["global_data_step"] self.global_train_examples_seen = state_dict.get( # newer addition "global_train_examples_seen", self.global_data_step * self.cfg.global_train_batch_size ) self.global_train_tokens_seen = state_dict.get( # newer addition "global_train_tokens_seen", self.global_data_step * self.cfg.global_train_batch_size * self.cfg.model.max_sequence_length, ) if not self.cfg.restore_dataloader: self.global_data_step = 0 self.global_train_examples_seen = 0 self.global_train_tokens_seen = 0 elif self.cfg.fast_forward_batches: self.global_data_step += self.cfg.fast_forward_batches # Technically we don't "see" these batches that we fast-forward through, but we use # this variable to update the position of the dataset so we need to include them here. self.global_train_examples_seen += self.cfg.fast_forward_batches * self.cfg.global_train_batch_size # NOTE: on the other hand we don't add anything to 'self.global_train_tokens_seen' here because # that variable is meant to track the actual number of tokens trained on. if self.global_data_step > 0: if self.global_data_step > self.global_step: log.info( f"Fast-forwarding data loader to step {self.global_step:,d}+{self.global_data_step-self.global_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) else: log.info( f"Fast-forwarding data loader to step {self.global_data_step:,d} " f"({self.global_train_examples_seen:,d} examples)" ) assert isinstance(self.train_loader.dataset, IterableDataset) self.train_loader.dataset.start_index = self.global_train_examples_seen if not self.cfg.restore_base_learning_rate: # Reset base learning rate to the value in the config, not the checkpoint. set_new_base_lr(self.optim, self.scheduler, self.cfg.optimizer.learning_rate) # RNG states. if "rng" in state_dict: rng_state = state_dict["rng"] self.restore_rng_state(rng_state) def restore_rng_state(self, rng_state: Dict[str, Any]) -> None: random.setstate(rng_state["python"]) np.random.set_state(rng_state["numpy"]) torch.set_rng_state(rng_state["torch"]) torch.cuda.set_rng_state(rng_state["cuda"]) def save_sharded_checkpoint(self) -> Path: checkpoint_dir = Path(self.cfg.save_folder) / f"step{self.global_step}" checkpoint_dir_tmp = Path(self.cfg.save_folder) / f"step{self.global_step}-tmp" try: next(checkpoint_dir.glob("*")) if self.cfg.save_overwrite: if get_global_rank() == 0: shutil.rmtree(checkpoint_dir) else: raise OlmoConfigurationError( f"Checkpoint for step {self.global_step} already exists, use --save-overwrite to overwrite it" ) except StopIteration: pass if get_global_rank() == 0: checkpoint_dir_tmp.mkdir(parents=True, exist_ok=True) self.checkpoints.append(checkpoint_dir) barrier() # Flush data indices file. if self.indices_file is not None: self.indices_file.flush() # Write the checkpoint. with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=True), optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), ): # NOTE: Alternatively we could use the checkpointing method in this test # https://github.com/pytorch/pytorch/blob/main/test/distributed/checkpoint/test_fsdp_optim_state.py # but we've had issues with that on AMD GPUs. See # https://github.com/pytorch/pytorch/issues/100041 # checkpoint.save_state_dict(self.state_dict(), checkpoint.FileSystemWriter(checkpoint_dir)) torch.save(self.state_dict(), checkpoint_dir_tmp / f"rank{get_global_rank()}.pt") # Save config too. if get_global_rank() == 0: self.cfg.save(checkpoint_dir_tmp / "config.yaml") barrier() if get_global_rank() == 0: # Replace temp directory with target checkpoint directory. checkpoint_dir_tmp.replace(checkpoint_dir) # Link to 'latest'. latest_path = Path(self.cfg.save_folder) / "latest" latest_path.unlink(missing_ok=True) latest_path.symlink_to(checkpoint_dir.name, target_is_directory=True) # In the cases where we're using a shared NFS drive between ranks to save checkpoints, # replacing the temp directory with the final directory from rank 0 might not be immediately # realized in the file systems of the other ranks. # So we wait here across all ranks until that final checkpoint directory is visible. wait_on(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0) # Remove old checkpoints. if self.cfg.save_num_checkpoints_to_keep > 0: while len(self.checkpoints) > self.cfg.save_num_checkpoints_to_keep: self.remove_sharded_checkpoint(0) barrier() # Upload checkpoint to bucket. if self.cfg.remote_save_folder is not None: files_to_upload = [f"rank{get_global_rank()}.pt"] if get_global_rank() == 0: files_to_upload.append("config.yaml") for fname in files_to_upload: source = checkpoint_dir / fname target = f"{self.cfg.remote_save_folder}/{checkpoint_dir.name}/{fname}" log.info(f"Uploading {source} to {target}...") upload(source, target, save_overwrite=self.cfg.save_overwrite) barrier() return checkpoint_dir def remove_sharded_checkpoint(self, idx: int = 0): oldest_checkpoint = self.checkpoints.pop(idx) barrier() if get_global_rank() == 0 and oldest_checkpoint.is_dir(): shutil.rmtree(oldest_checkpoint, ignore_errors=True) latest_path = Path(self.cfg.save_folder) / "latest" if latest_path.resolve() == oldest_checkpoint.resolve(): latest_path.unlink() barrier() def restore_sharded_checkpoint(self, load_path: PathOrStr): # Zero-gradients to avoid gathering them. self.optim.zero_grad(set_to_none=True) with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.SHARDED_STATE_DICT, state_dict_config=ShardedStateDictConfig(offload_to_cpu=True), optim_state_dict_config=ShardedOptimStateDictConfig(offload_to_cpu=True), ): # NOTE: Alternatively we could use the checkpointing method in this test # https://github.com/pytorch/pytorch/blob/main/test/distributed/checkpoint/test_fsdp_optim_state.py # but we've had issues with that on AMD GPUs. See # https://github.com/pytorch/pytorch/issues/100041 # But basically it would look like this. # Load the serialized state dict in place. # state_dict = self.state_dict() # del state_dict["optim"] # Can't load optimizer together with the model # checkpoint.load_state_dict(state_dict, checkpoint.FileSystemReader(load_path)) # self.fsdp_model.load_state_dict(state_dict["model"]) # Load other state... # Load optim state. # optim_state = load_sharded_optimizer_state_dict( # model_state_dict=state_dict["model"], # optimizer_key="optim", # storage_reader=checkpoint.FileSystemReader(load_path), # ) # flattened_osd = FSDP.optim_state_dict_to_load(optim_state["optim"], self.fsdp_model, self.optim) # self.optim.load_state_dict(flattened_osd) # Deserialize state dictionary. state_dict = torch.load(resource_path(load_path, f"rank{get_global_rank()}.pt")) # Load model and optimizer state. log.info("Loading model state...") self.fsdp_model.load_state_dict(state_dict["model"]) log.info("Loading optimizer state...") # NOTE: careful, the order of these arguments has changed since the 2.0 release. if version.parse(torch.__version__) < version.parse("2.1.0"): # flattened_osd = FSDP.optim_state_dict_to_load(optim_state["optim"], self.fsdp_model, self.optim) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(state_dict["optim"], self.fsdp_model, self.optim) # type: ignore else: # flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, optim_state["optim"]) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, state_dict["optim"]) # type: ignore self.optim.load_state_dict(flattened_osd) # Load non-tensor state. self.load_non_tensor_state_dict(state_dict) del state_dict, flattened_osd barrier() def save_unsharded_checkpoint(self) -> Path: # Zero-gradients to avoid gathering them. self.optim.zero_grad(set_to_none=True) checkpoint_dir = Path(self.cfg.save_folder) / f"step{self.global_step}-unsharded" checkpoint_dir_tmp = Path(self.cfg.save_folder) / f"step{self.global_step}-unsharded-tmp" try: next(checkpoint_dir.glob("*")) if self.cfg.save_overwrite: if get_global_rank() == 0: shutil.rmtree(checkpoint_dir) else: raise OlmoConfigurationError( f"Unsharded checkpoint for step {self.global_step} already exists, use --save-overwrite to overwrite it" ) except StopIteration: pass if get_global_rank() == 0: checkpoint_dir_tmp.mkdir(parents=True, exist_ok=True) self.unsharded_checkpoints.append(checkpoint_dir) barrier() # Flush data indices file. if self.indices_file is not None: self.indices_file.flush() # Write the checkpoint. with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig(rank0_only=True, offload_to_cpu=True), optim_state_dict_config=FullOptimStateDictConfig(rank0_only=True, offload_to_cpu=True), ): # We'll write the model and optimizer state dicts individually to reduce (CPU) memory consumption. # First the model state. model_state_dict = self.fsdp_model.state_dict() if get_global_rank() == 0: torch.save(model_state_dict, checkpoint_dir_tmp / "model.pt") del model_state_dict # Then the optimizer state. optim_state_dict = FSDP.optim_state_dict(self.fsdp_model, self.optim) if get_global_rank() == 0: torch.save(optim_state_dict, checkpoint_dir_tmp / "optim.pt") del optim_state_dict # Then everything else. other_state_dict = self.non_tensor_state_dict() if get_global_rank() == 0: torch.save(other_state_dict, checkpoint_dir_tmp / "other.pt") self.cfg.save(checkpoint_dir_tmp / "config.yaml") barrier() if get_global_rank() == 0: # Replace temp directory with target checkpoint directory. checkpoint_dir_tmp.replace(checkpoint_dir) # Link to 'latest'. latest_path = Path(self.cfg.save_folder) / "latest-unsharded" latest_path.unlink(missing_ok=True) latest_path.symlink_to(checkpoint_dir.name, target_is_directory=True) # In the cases where we're using a shared NFS drive between ranks to save checkpoints, # replacing the temp directory with the final directory from rank 0 might not be immediately # realized in the file systems of the other ranks. # So we wait here across all ranks until that final checkpoint directory is visible. wait_on(lambda: checkpoint_dir.exists(), "Waiting for checkpoint directory", timeout=10.0) # Remove old checkpoints. if self.cfg.save_num_unsharded_checkpoints_to_keep > 0: while len(self.unsharded_checkpoints) > self.cfg.save_num_unsharded_checkpoints_to_keep: self.remove_unsharded_checkpoint(0) barrier() # Upload checkpoint to bucket. if self.cfg.remote_save_folder is not None: if get_global_rank() == 0: for fname in ["config.yaml", "model.pt", "optim.pt", "other.pt"]: source = checkpoint_dir / fname target = f"{self.cfg.remote_save_folder}/{checkpoint_dir.name}/{fname}" log.info(f"Uploading {source} to {target}...") upload(source, target, save_overwrite=self.cfg.save_overwrite) barrier() return checkpoint_dir def remove_unsharded_checkpoint(self, idx: int = 0): barrier() oldest_checkpoint = self.unsharded_checkpoints.pop(idx) if get_global_rank() == 0 and oldest_checkpoint.is_dir(): shutil.rmtree(oldest_checkpoint, ignore_errors=True) latest_path = Path(self.cfg.save_folder) / "latest-unsharded" if latest_path.resolve() == oldest_checkpoint.resolve(): latest_path.unlink() barrier() def restore_unsharded_checkpoint(self, load_path: PathOrStr): # Zero-gradients to avoid gathering them. self.optim.zero_grad(set_to_none=True) with FSDP.state_dict_type( self.fsdp_model, state_dict_type=StateDictType.FULL_STATE_DICT, state_dict_config=FullStateDictConfig(rank0_only=True, offload_to_cpu=True), optim_state_dict_config=FullOptimStateDictConfig(rank0_only=True, offload_to_cpu=True), ): # Load model state. log.info("Loading model state...") self.fsdp_model.load_state_dict(torch.load(resource_path(load_path, "model.pt"))) # Load optimizer state. log.info("Loading optimizer state...") optim_state_dict = torch.load(resource_path(load_path, "optim.pt")) # NOTE: careful, the order of these arguments has changed since the 2.0 release. if version.parse(torch.__version__) < version.parse("2.1.0"): # flattened_osd = FSDP.optim_state_dict_to_load(optim_state["optim"], self.fsdp_model, self.optim) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(optim_state_dict, self.fsdp_model, self.optim) # type: ignore else: # flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, optim_state["optim"]) # type: ignore flattened_osd = FSDP.optim_state_dict_to_load(self.fsdp_model, self.optim, optim_state_dict) # type: ignore del optim_state_dict self.optim.load_state_dict(flattened_osd) del flattened_osd # Load other state. other_state_dict = torch.load(resource_path(load_path, "other.pt")) self.load_non_tensor_state_dict(other_state_dict) barrier() def save_checkpoint(self, checkpoint_type: CheckpointType = CheckpointType.sharded) -> Path: if checkpoint_type == CheckpointType.sharded: return self.save_sharded_checkpoint() elif checkpoint_type == CheckpointType.unsharded: return self.save_unsharded_checkpoint() else: raise NotImplementedError(checkpoint_type) def restore_checkpoint(self, load_path: PathOrStr, checkpoint_type: Optional[CheckpointType] = None): if checkpoint_type == CheckpointType.unsharded or ( checkpoint_type is None and str(load_path).endswith("-unsharded") ): self.restore_unsharded_checkpoint(load_path) elif checkpoint_type == CheckpointType.sharded or checkpoint_type is None: self.restore_sharded_checkpoint(load_path) elif checkpoint_type is not None: raise NotImplementedError(checkpoint_type) def remove_checkpoint(self, idx: int = 0, checkpoint_type: CheckpointType = CheckpointType.sharded): if checkpoint_type == CheckpointType.sharded: self.remove_sharded_checkpoint(idx=idx) elif checkpoint_type == CheckpointType.unsharded: self.remove_unsharded_checkpoint(idx=idx) else: raise NotImplementedError(checkpoint_type) def get_labels(self, batch: Dict[str, Any]) -> torch.Tensor: # Labels are just input IDs shifted to the left (first item is ignored). labels, attention_mask = batch["input_ids"], batch.get("attention_mask") if attention_mask is not None: labels = labels.masked_fill(attention_mask == 0.0, -100) return labels[..., 1:].contiguous() def model_forward( self, batch: Dict[str, Any], loss_reduction: str = "mean" ) -> Tuple[torch.Tensor, torch.Tensor]: # shape: (batch_size, seq_len, vocab_size) logits = self.fsdp_model( input_ids=batch["input_ids"], attention_mask=batch.get("attention_mask"), attention_bias=batch.get("attention_bias"), ).logits logits_for_loss = logits[..., :-1, :].contiguous() # shape: (batch_size * seq_len, vocab_size) logits_for_loss = logits_for_loss.view(-1, logits_for_loss.size(-1)) # shape: (batch_size, seq_len) labels = self.get_labels(batch) # shape: (batch_size * seq_len,) labels = labels.view(-1) ce_loss = F.cross_entropy(logits_for_loss, labels, ignore_index=-100, reduction=loss_reduction) if loss_reduction == "none": # Reshape (batch_size * seq_len,) -> (batch_size, seq_len) ce_loss = ce_loss.view(batch["input_ids"].shape[0], -1) return ce_loss, logits def train_batch(self, batch: Dict[str, Any]) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: # Split into micro-batches. micro_batches = self.split_batch(batch) # In case this helps with memory utilization. del batch ce_batch_loss = torch.tensor(0.0, device=self.device) z_batch_loss = None if not self.cfg.softmax_auxiliary_loss else torch.tensor(0.0, device=self.device) for micro_batch in micro_batches: with torch.autocast("cuda", enabled=True, dtype=self.cfg.autocast_precision): # Run forward pass. ce_loss, logits = self.model_forward(micro_batch) ce_loss = ce_loss / len(micro_batches) # In case this helps with memory utilization. del micro_batch # Update overall CE batch loss. ce_batch_loss += ce_loss.detach() # Get loss to optimize for. if self.cfg.softmax_auxiliary_loss: z_squared = logits.logsumexp(-1).pow(2).mean() z_loss = 1e-4 * z_squared / len(micro_batches) loss = ce_loss + z_loss # Update overall Z batch loss. z_batch_loss += z_loss.detach() else: loss = ce_loss del logits # Check for nan. if torch.isnan(loss): raise ValueError("nan loss encountered") # Run backward pass. loss.backward() return ce_batch_loss, z_batch_loss def train_step(self, batch: Dict[str, Any]) -> Dict[str, float]: # Write data-indices to file. if self.indices_file is not None and "index" in batch: indices = "\t".join(str(int(i)) for i in batch["index"]) self.indices_file.write(f"{self.global_step}\t{indices}\n") # Zero-gradients. self.optim.zero_grad(set_to_none=True) # Reset metrics. self.ce_train_loss_metric.reset() if self.z_train_loss_metric is not None: self.z_train_loss_metric.reset() # Move tensors to the right device. batch = move_to_device(batch, self.device) # Run forward-backward pass. losses = [] if self.cfg.matformer_factor > 1: self.matmng = MatformerManager.get_instance() self.matmng.current_factor = 1 assert self.cfg.matformer_factor % 2 == 0 iters = int(math.log2(self.cfg.matformer_factor)) + 1 for i in range(iters): # Reduce loss metrics across ranks. ce_batch_loss, z_batch_loss = self.train_batch(batch) self.ce_train_loss_metric.update(ce_batch_loss) ce_batch_loss = self.ce_train_loss_metric.compute() losses.append((ce_batch_loss, z_batch_loss)) self.matmng.current_factor *= 2 else: ce_batch_loss, z_batch_loss = self.train_batch(batch) # Reduce loss metrics across ranks. self.ce_train_loss_metric.update(ce_batch_loss) ce_batch_loss = self.ce_train_loss_metric.compute() losses.append((ce_batch_loss, z_batch_loss)) # Clip gradient norms. grad_norm: Optional[float] = None if self.cfg.max_grad_norm is not None: grad_norm = self.fsdp_model.clip_grad_norm_(self.cfg.max_grad_norm).item() # Optimizer step. self.optim.step() self.scheduler.step() if len(losses) > 1: metrics = {} for i in range(len(losses)): factor = 2**i metrics[f'train/CrossEntropyLoss 1/{factor}'] = losses[i][0].item() metrics[f'train/Perplexity 1/{factor}'] = (2**(losses[i][0])).item() if z_batch_loss is not None and self.z_train_loss_metric is not None: self.z_train_loss_metric.update(z_batch_loss) z_batch_loss = self.z_train_loss_metric.compute() metrics[f"train/ZLoss 1/{factor}"] = z_batch_loss.item() else: metrics = { "train/CrossEntropyLoss": ce_batch_loss.item(), "train/Perplexity": torch.exp(ce_batch_loss).item(), } if z_batch_loss is not None and self.z_train_loss_metric is not None: self.z_train_loss_metric.update(z_batch_loss) z_batch_loss = self.z_train_loss_metric.compute() metrics["train/ZLoss"] = z_batch_loss.item() if grad_norm is not None: metrics["optim/grad_norm"] = grad_norm # Update min train loss and see if we should stop early. self.min_train_loss = min(self.min_train_loss, ce_batch_loss.item()) # type: ignore if ( self.cfg.early_stopping_factor is not None and self.global_step > self.cfg.scheduler.t_warmup and ce_batch_loss.item() > self.cfg.early_stopping_factor * self.min_train_loss ): raise ValueError("Stopping early because train loss has increased substantially") return metrics def eval_batch(self, batch: Dict[str, Any]) -> Tuple[torch.Tensor, torch.Tensor]: with torch.autocast("cuda", enabled=True, dtype=self.cfg.autocast_precision): ce_loss, logits = self.model_forward(batch, loss_reduction="none") return ce_loss.mean(dim=-1), logits def eval_step(self, batch: Dict[str, Any], evaluator: Evaluator) -> None: # Move tensors to the right device. batch = move_to_device(batch, self.device) # Run forward pass. with torch.no_grad(): # NOTE: 'torch.inference_mode()' doesn't work with 'torch.compile()'. ce_loss, logits = self.eval_batch(batch) # Update metrics. evaluator.update_metrics( batch, ce_loss, logits ) # batch includes all keys that the downstream evaluation needs barrier() def split_batch(self, batch: Dict[str, Any]) -> List[Dict[str, Any]]: microbatch_size = self.cfg.device_train_microbatch_size batch_size = batch["input_ids"].shape[0] if batch_size <= microbatch_size: return [batch] else: micro_batches = {} for key, value in batch.items(): if isinstance(value, torch.Tensor): micro_batches[key] = value.split(microbatch_size, dim=0) elif isinstance(value, list): micro_batches[key] = [ value[microbatch_size * i : microbatch_size * i + microbatch_size] for i in range(math.ceil(batch_size / microbatch_size)) ] else: raise ValueError(f"unexpected item in batch: '{key}={value}'") return [ {key: value[i] for key, value in micro_batches.items()} # type: ignore for i in range(len(micro_batches["input_ids"])) ] def system_metrics(self) -> Dict[str, float]: metrics = {} peak_gpu_mb = peak_gpu_memory() if peak_gpu_mb is not None: metrics["System/Peak GPU Memory (MB)"] = peak_gpu_mb return metrics def log_metrics_to_console(self, prefix: str, metrics: Dict[str, float]): def format_float(value: float) -> str: if value < 0.0001: return str(value) # scientific notation elif value > 1000: return f"{int(value):,d}" elif value > 100: return f"{value:.1f}" elif value > 10: return f"{value:.2f}" elif value > 1: return f"{value:.3f}" else: return f"{value:.4f}" log.info( f"{prefix}\n" + "\n".join([f" {name}={format_float(value)}" for name, value in metrics.items()]) ) def should_log_this_step(self) -> bool: if self.global_step % self.cfg.console_log_interval == 0: return True elif self.cfg.wandb is not None and self.global_step % self.cfg.wandb.log_interval == 0: return True else: return False def eval(self) -> Dict[str, Any]: # Zero gradients and set model to 'eval' mode. self.optim.zero_grad(set_to_none=True) self.fsdp_model.eval() eval_metrics = {} for evaluator in self.evaluators: log.info(f"Running evaluation for '{evaluator.label}'...") # Run model over batches. if self.cfg.matformer_factor > 1: self.matmng = MatformerManager.get_instance() self.matmng.current_factor = 1 assert self.cfg.matformer_factor % 2 == 0 iters = int(math.log2(self.cfg.matformer_factor)) + 1 for i in range(iters): # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label} 1/{self.matmng.current_factor}", metrics) self.matmng.current_factor *= 2 for m in metrics: eval_metrics.pop(m, None) else: # Reset metrics. evaluator.reset_metrics() # Initialize data loader iterator. eval_batches = iter(evaluator.eval_loader) # Adjust how many batches to evaluate on. num_eval_batches = ( evaluator.subset_num_batches if evaluator.subset_num_batches is not None else self.cfg.eval_subset_num_batches ) if num_eval_batches > 0: num_eval_batches = min(num_eval_batches, len(evaluator.eval_loader)) eval_batches = islice(eval_batches, num_eval_batches) for eval_step, eval_batch in enumerate(eval_batches): self.eval_step(eval_batch, evaluator) # Log to console. if eval_step + 1 == num_eval_batches or (eval_step + 1) % self.cfg.console_log_interval == 0: log.info(f"[eval_step={eval_step + 1}/{num_eval_batches}]") # Get final metrics. metrics = evaluator.compute_metrics() eval_metrics.update(metrics) self.log_metrics_to_console(f"{evaluator.label}", metrics) del eval_batches return eval_metrics def fit(self): start_time = time.time() if self.cfg.load_path is not None and self.global_step > 0 and self.cfg.eval_on_load: eval_metrics = self.eval() if wandb.run is not None: wandb.log(eval_metrics, step=self.global_step) # Set model to 'train' mode. self.fsdp_model.train() # Initialize monitors. assert self.cfg.device_train_batch_size is not None speed_monitor = SpeedMonitor(self.cfg.speed_monitor) lr_monitor = LRMonitor(self.optim) # Log system metrics at the start of training. sys_metrics = self.system_metrics() if sys_metrics: self.log_metrics_to_console("Pre-train system metrics", sys_metrics) if wandb.run is not None: wandb.log(sys_metrics, step=0) # Train. first_batch: bool = True for batch in self.train_loader: # Bookkeeping. # NOTE: To track the global batch size / number of tokens per batch we make the assumption that all # batches see the same number of tokens, which should be the case for language model pre-training # (at least when drop_last=True). # Alternatively we'd have to use a distributed all reduce over seq_len here, but I don't want that overhead. # So for now I'm putting these assertions here so if the assumption is violated it will fail loudly. batch_size, seq_len = batch["input_ids"].shape assert seq_len == self.cfg.model.max_sequence_length assert batch_size == self.cfg.device_train_batch_size
global_batch_size = batch_size * get_world_size() # assumes batch size equal across ranks
12
2023-11-14 02:24:07+00:00
16k
1in-oos/ccplus
caringcaribou/tests/test_module_uds.py
[ { "identifier": "Constants", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Constants(object):\n # NR_SI (Negative Response Service Identifier) is a bit special, since\n # it is not a service per se.\n # From ISO-14229-1 specification: \"The NR_SI value is co-ordinated with\n # the SI values. The NR_SI value is not used as a SI value in order to\n # make A_Data coding and decoding easier.\"\n NR_SI = 0x7F" }, { "identifier": "Iso14229_1", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Iso14229_1(object):\n P3_CLIENT = 5\n\n def __init__(self, tp):\n self.tp = tp\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass\n\n @staticmethod\n def get_service_response_id(request_id):\n \"\"\"\n Returns the service response ID for the given request ID\n\n :param request_id: Request service ID\n :return: Corresponding response service ID\n \"\"\"\n return request_id + 0x40\n\n @staticmethod\n def get_service_request_id(response_id):\n \"\"\"\n Returns the service request ID for the given response ID\n\n :param response_id: Response service ID\n :return: Corresponding request service ID\n \"\"\"\n return response_id - 0x40\n\n def send_request(self, data):\n \"\"\"\n Sends a request message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_request(data)\n\n def send_response(self, data):\n \"\"\"\n Sends a response message containing 'data' through the underlying\n TP layer\n\n :param data: The data to send\n :return: None\n \"\"\"\n return self.tp.send_response(data)\n\n def receive_response(self, wait_window):\n \"\"\"\n Attempts to receive a response through the underlying TP layer\n\n :param wait_window: Minimum time (in seconds) to wait before timeout\n :return: The received response if successful,\n None otherwise\n \"\"\"\n start_time = time.process_time()\n while True:\n current_time = time.process_time()\n if (current_time - start_time) > wait_window:\n return None\n\n response = self.tp.indication(wait_window)\n NRC = NegativeResponseCodes\n NRC_RCRRP = NRC.REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\n if response is not None and len(response) >= 3:\n if (response[0] == Constants.NR_SI and\n response[2] == NRC_RCRRP):\n continue\n break\n return response\n\n @staticmethod\n def is_positive_response(response):\n \"\"\"\n Returns a bool indicating whether 'response' is positive\n\n :param response: ISO-14229-1 response data\n :return: False if response is a NEGATIVE_RESPONSE,\n True otherwise\n \"\"\"\n if (response is not None and\n len(response) > 0 and\n response[0] != Constants.NR_SI):\n return True\n return False\n\n def read_data_by_identifier(self, identifier):\n \"\"\"\n Sends a \"read data by identifier\" request for 'identifier'\n\n :param identifier: Data identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n response = []\n num_dids = len(identifier)\n if num_dids > 0:\n request = [0] * ((num_dids * 2) + 1)\n request[0] = ServiceID.READ_DATA_BY_IDENTIFIER\n for i in range(0, num_dids):\n request[i * 2 + 1] = (identifier[i] >> 8) & 0xFF\n request[i * 2 + 2] = identifier[i] & 0xFF\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n return response\n\n def read_memory_by_address(self, address_and_length_format,\n memory_address, memory_size):\n \"\"\"\n Sends a \"read memory by address\" request for 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.READ_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_memory_by_address(self, address_and_length_format,\n memory_address, memory_size, data):\n \"\"\"\n Sends a \"write memory by address\" request to write 'data' to\n 'memory_address'\n\n :param address_and_length_format: Address and length format\n :param memory_address: Memory address\n :param memory_size: Memory size\n :param data: The data to write to 'memory_address'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n addr_sz_fmt = (address_and_length_format >> 4) & 0xF\n data_sz_fmt = (address_and_length_format & 0xF)\n\n request = [0] * (1 + 1 + addr_sz_fmt + data_sz_fmt)\n request[0] = ServiceID.WRITE_MEMORY_BY_ADDRESS\n request[1] = address_and_length_format\n offset = 2\n for i in (range(0, addr_sz_fmt)):\n request[addr_sz_fmt + offset - i - 1] = (memory_address & 0xFF)\n memory_address = (memory_address >> 8)\n\n offset += addr_sz_fmt\n\n for i in (range(0, data_sz_fmt)):\n request[data_sz_fmt + offset - i - 1] = (memory_size & 0xFF)\n memory_size = (memory_size >> 8)\n\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def write_data_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"write data by identifier\" request to write 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data to write to 'identifier'\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.WRITE_DATA_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def input_output_control_by_identifier(self, identifier, data):\n \"\"\"\n Sends a \"input output control by identifier\" request for 'data' to\n 'identifier'\n\n :param identifier: Data identifier\n :param data: Data\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * (1 + 2)\n\n request[0] = ServiceID.INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\n request[1] = (identifier >> 8) & 0xFF\n request[2] = identifier & 0xFF\n request += data\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def dynamically_define_data_identifier(self, identifier,\n sub_function, sub_function_arg):\n \"\"\"\n Sends a \"dynamically define data identifier\" request for\n 'identifier'\n\n :param identifier: DDDID to set\n :param sub_function: Sub function\n :param sub_function_arg: Sub function arguments\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (identifier is None or\n sub_function is None or\n sub_function_arg is None):\n return None\n\n request = [0] * (1 + 1 + 2 + len(sub_function_arg) * 4)\n request[0] = ServiceID.DYNAMICALLY_DEFINE_DATA_IDENTIFIER\n request[1] = sub_function\n request[2] = (identifier >> 8) & 0xFF\n request[3] = identifier & 0xFF\n\n offset = 4\n for did in sub_function_arg:\n request[offset + 0] = (did.sourceDataIdentifier >> 8) & 0xFF\n request[offset + 1] = did.sourceDataIdentifier & 0xFF\n request[offset + 2] = did.positionInSourceDataRecord\n request[offset + 3] = did.memorySize\n offset += 4\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def diagnostic_session_control(self, session_type):\n \"\"\"\n Sends a \"DiagnosticSessionControl\" request for specified session\n type\n\n :param session_type: Indicates which kind of session should be\n requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n request[1] = session_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def ecu_reset(self, reset_type):\n \"\"\"\n Sends an \"ECU reset\" request for specified reset type\n\n :param reset_type: Indicates which kind of reset should be requested\n :return: Response data if successful,\n None otherwise\n \"\"\"\n request = [0] * 2\n request[0] = ServiceID.ECU_RESET\n request[1] = reset_type\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_request_seed(self, level, data_record=None):\n \"\"\"\n Sends a Security Access \"Request seed\" message for 'level'\n\n :param level: Security Access Type level to send request seed for\n :param data_record: Optional data to transmit when requesting seed,\n e.g. client identification\n :return: Response data (containing seed) if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n if data_record:\n for data_record in data_record:\n request.append(data_record)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def security_access_send_key(self, level, key):\n \"\"\"\n Sends a Security Access \"Send key\" message with 'key' for 'level'\n\n :param level: Security Access Type level to send key for\n :param key: Key to transmit\n :return: Response data if successful,\n None otherwise\n \"\"\"\n service_id = ServiceID.SECURITY_ACCESS\n request = [service_id, level]\n for key_byte in key:\n request.append(key_byte)\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response\n\n def read_data_by_periodic_identifier(self, transmission_mode,\n identifier):\n \"\"\"\n Sends a \"read data by periodic identifier\" request for 'identifier'\n\n :param transmission_mode: Transmission mode\n :param identifier: Identifier\n :return: Response data if successful,\n None otherwise\n \"\"\"\n if (transmission_mode is None or\n identifier is None or\n len(identifier) == 0):\n return None\n\n request = [0] * (2 + len(identifier))\n request[0] = ServiceID.READ_DATA_BY_PERIODIC_IDENTIFIER\n request[1] = transmission_mode\n\n for i in range(0, len(identifier)):\n request[2 + i] = identifier[i]\n\n self.tp.send_request(request)\n response = self.receive_response(self.P3_CLIENT)\n\n return response" }, { "identifier": "NegativeResponseCodes", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class NegativeResponseCodes(object):\n \"\"\"\n ISO-14229-1 negative response codes\n \"\"\"\n POSITIVE_RESPONSE = 0x00\n # 0x01-0x0F ISO SAE Reserved\n GENERAL_REJECT = 0x10\n SERVICE_NOT_SUPPORTED = 0x11\n SUB_FUNCTION_NOT_SUPPORTED = 0x12\n INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT = 0x13\n RESPONSE_TOO_LONG = 0x14\n # 0x15-0x20 ISO SAE Reserved\n BUSY_REPEAT_REQUEST = 0x21\n CONDITIONS_NOT_CORRECT = 0x22\n # 0x23 ISO SAE Reserved\n REQUEST_SEQUENCE_ERROR = 0x24\n NO_RESPONSE_FROM_SUBNET_COMPONENT = 0x25\n FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION = 0x26\n # 0x27-0x30 ISO SAE Reserved\n REQUEST_OUT_OF_RANGE = 0x31\n # 0x32 ISO SAE Reserved\n SECURITY_ACCESS_DENIED = 0x33\n # 0x34 ISO SAE Reserved\n INVALID_KEY = 0x35\n EXCEEDED_NUMBER_OF_ATTEMPTS = 0x36\n REQUIRED_TIME_DELAY_NOT_EXPIRED = 0x37\n # 0x38-0x4F Reserved by extended data link security document\n # 0x50-0x6F ISO SAE Reserved\n UPLOAD_DOWNLOAD_NOT_ACCEPTED = 0x70\n TRANSFER_DATA_SUSPENDED = 0x71\n GENERAL_PROGRAMMING_FAILURE = 0x72\n WRONG_BLOCK_SEQUENCE_COUNTER = 0x73\n # 0x74-0x77 ISO SAE Reserved\n REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING = 0x78\n # 0x79-0x7D ISO SAE Reserved\n SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7E\n SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION = 0x7F\n # 0x80 ISO SAE Reserved\n RPM_TOO_HIGH = 0x81\n RPM_TOO_LOW = 0x82\n ENGINE_IS_RUNNING = 0x83\n ENGINE_IS_NOT_RUNNING = 0x84\n ENGINE_RUN_TIME_TOO_LOW = 0x85\n TEMPERATURE_TOO_HIGH = 0x86\n TEMPERATURE_TOO_LOW = 0x87\n VEHICLE_SPEED_TOO_HIGH = 0x88\n VEHICLE_SPEED_TOO_LOW = 0x89\n THROTTLE_PEDAL_TOO_HIGH = 0x8A\n THROTTLE_PEDAL_TOO_LOW = 0x8B\n TRANSMISSION_RANGE_NOT_IN_NEUTRAL = 0x8C\n TRANSMISSION_RANGE_NOT_IN_GEAR = 0x8D\n # 0x8E ISO SAE Reserved\n BRAKE_SWITCHES_NOT_CLOSED = 0x8F\n SHIFT_LEVER_NOT_IN_PARK = 0x90\n TORQUE_CONVERTER_CLUTCH_LOCKED = 0x91\n VOLTAGE_TOO_HIGH = 0x92\n VOLTAGE_TOO_LOW = 0x93\n # 0x94-0xEF Reserved for specific conditions not correct\n # 0xF0-0xFE Vehicle manufacturer specific conditions not correct\n # 0xFF ISO SAE Reserved" }, { "identifier": "ServiceID", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class ServiceID(object):\n \"\"\"\n ISO-14229-1 service ID definitions\n \"\"\"\n DIAGNOSTIC_SESSION_CONTROL = 0x10\n ECU_RESET = 0x11\n CLEAR_DIAGNOSTIC_INFORMATION = 0x14\n READ_DTC_INFORMATION = 0x19\n READ_DATA_BY_IDENTIFIER = 0x22\n READ_MEMORY_BY_ADDRESS = 0x23\n READ_SCALING_DATA_BY_IDENTIFIER = 0x24\n SECURITY_ACCESS = 0x27\n COMMUNICATION_CONTROL = 0x28\n READ_DATA_BY_PERIODIC_IDENTIFIER = 0x2A\n DYNAMICALLY_DEFINE_DATA_IDENTIFIER = 0x2C\n WRITE_DATA_BY_IDENTIFIER = 0x2E\n INPUT_OUTPUT_CONTROL_BY_IDENTIFIER = 0x2F\n ROUTINE_CONTROL = 0x31\n REQUEST_DOWNLOAD = 0x34\n REQUEST_UPLOAD = 0x35\n TRANSFER_DATA = 0x36\n REQUEST_TRANSFER_EXIT = 0x37\n REQUEST_FILE_TRANSFER = 0x38\n WRITE_MEMORY_BY_ADDRESS = 0x3D\n TESTER_PRESENT = 0x3E\n ACCESS_TIMING_PARAMETER = 0x83\n SECURED_DATA_TRANSMISSION = 0x84\n CONTROL_DTC_SETTING = 0x85\n RESPONSE_ON_EVENT = 0x86\n LINK_CONTROL = 0x87" }, { "identifier": "Services", "path": "caringcaribou/utils/iso14229_1.py", "snippet": "class Services(object):\n \"\"\"Class structure containing service specific constants, sub-function\n parameters and functions\"\"\"\n\n class DiagnosticSessionControl(BaseService):\n\n service_id = ServiceID.DIAGNOSTIC_SESSION_CONTROL\n\n class DiagnosticSessionType(object):\n # 0x00 ISO SAE Reserved\n DEFAULT_SESSION = 0x01\n PROGRAMMING_SESSION = 0x02\n EXTENDED_DIAGNOSTIC_SESSION = 0x03\n SAFETY_SYSTEM_DIAGNOSTIC_SESSION = 0x04\n # 0x05-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n VEHICLE_MANUFACTURER_SESSION_MIN = 0x40\n VEHICLE_MANUFACTURER_SESSION_MAX = 0x5F\n # 0x60-0x7E System supplier specific\n SYSTEM_SUPPLIER_SESSION_MIN = 0x60\n SYSTEM_SUPPLIER_SESSION_MAX = 0x7E\n # 0x7F ISO SAE Reserved\n\n class EcuReset(BaseService):\n\n service_id = ServiceID.ECU_RESET\n\n class ResetType(object):\n # 0x00 ISO SAE Reserved\n HARD_RESET = 0x01\n KEY_OFF_ON_RESET = 0x02\n SOFT_RESET = 0x03\n ENABLE_RAPID_POWER_SHUTDOWN = 0x04\n DISABLE_RAPID_POWER_SHUTDOWN = 0x05\n # 0x06-0x3F ISO SAE Reserved\n # 0x40-0x5F Vehicle manufacturer specific\n # 0x60-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n class SecurityAccess(BaseService):\n\n service_id = ServiceID.SECURITY_ACCESS\n\n class RequestSeedOrSendKey(object):\n \"\"\"\n These are lined up so that value X \"request seed level N\" has\n a matching \"send key level N\" at value X+1.\n\n 0x01 is Request seed level 0x01\n 0x02 is Send key level 0x01\n 0x03 is Request seed level 0x02\n 0x04 is Send key level 0x02\n (...)\n 0x41 is Request seed level 0x21\n 0x42 is Send key level 0x21\n\n The security levels numbering is arbitrary and does not imply\n any relationship between the levels.\n \"\"\"\n\n # 0x00 ISO SAE Reserved\n # 0x01-0x42 Vehicle manufacturer specific request\n # seed/send key pairs\n # 0x43-0X5E ISO SAE Reserved\n ISO_26021_2_VALUES = 0x5F\n ISO_26021_2_SEND_KEY = 0x60\n # 0x61-0x7E System supplier specific\n # 0x7F ISO SAE Reserved\n\n __REQUEST_SEED_MIN = 0x01\n __REQUEST_SEED_MAX = 0x41\n __SEND_KEY_MIN = 0x02\n __SEND_KEY_MAX = 0x42\n\n def is_valid_request_seed_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid request seed\n value and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__REQUEST_SEED_MIN\n <= value <= self.__REQUEST_SEED_MAX)\n is_odd = value % 2 == 1\n return valid_interval and is_odd\n\n def is_valid_send_key_level(self, sub_function):\n \"\"\"Returns True if 'sub_function' is a valid send key value\n and False otherwise\"\"\"\n value = sub_function & 0x7F\n valid_interval = (self.__SEND_KEY_MIN\n <= value <= self.__SEND_KEY_MAX)\n is_even = value % 2 == 0\n return valid_interval and is_even\n\n @staticmethod\n def get_send_key_for_request_seed(seed):\n return seed + 1\n\n class TesterPresent(BaseService):\n\n service_id = ServiceID.TESTER_PRESENT" }, { "identifier": "MockEcuIso14229", "path": "caringcaribou/tests/mock/mock_ecu_uds.py", "snippet": "class MockEcuIso14229(MockEcuIsoTp, MockEcu):\n \"\"\"ISO-14229-1 (Unified Diagnostic Services) mock ECU handler\"\"\"\n\n IDENTIFIER_REQUEST_POSITIVE = 0x01\n IDENTIFIER_REQUEST_POSITIVE_RESPONSE = 0x72\n IDENTIFIER_REQUEST_NEGATIVE = 0x02\n\n REQUEST_IDENTIFIER_VALID = 0xA001\n REQUEST_IDENTIFIER_INVALID = 0xA002\n REQUEST_VALUE = [0xC0, 0xFF, 0xEE]\n\n REQUEST_ADDRESS_LENGTH_AND_FORMAT = 0x22\n REQUEST_ADDRESS = 0x0001\n REQUEST_DATA_SIZE = 0x10\n DATA = list(range(0x14))\n\n # TODO Use dynamic seed value and verify keys using a simple algorithm\n SECURITY_ACCESS_SEED = [0x36, 0x57]\n SECURITY_ACCESS_KEY = [0xC9, 0xA9]\n\n def __init__(self, arb_id_request, arb_id_response, bus=None):\n MockEcu.__init__(self, bus)\n self.ARBITRATION_ID_ISO_14229_REQUEST = arb_id_request\n self.ARBITRATION_ID_ISO_14229_RESPONSE = arb_id_response\n # Set CAN filter to only listen to incoming requests on the correct arbitration ID\n arbitration_id_filter = [{\"can_id\": arb_id_request, \"can_mask\": 0x1fffffff}]\n self.bus.set_filters(arbitration_id_filter)\n # Setup ISO-TP using the filtered bus\n self.iso_tp = IsoTp(arb_id_request=self.ARBITRATION_ID_ISO_14229_REQUEST,\n arb_id_response=self.ARBITRATION_ID_ISO_14229_RESPONSE,\n bus=self.bus)\n # Setup diagnostics on top of ISO-TP\n self.diagnostics = Iso14229_1(tp=self.iso_tp)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n MockEcuIsoTp.__exit__(self, None, None, None)\n\n @staticmethod\n def create_positive_response(request_service_id, response_data=None):\n \"\"\"\n Returns data for a positive response of 'request_service_id' with an optional 'response_data' payload\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param response_data: List of data bytes to transmit in the response\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n # Positive response uses a response service ID (SIDPR) based on the request service ID (SIDRQ)\n service_response_id = Iso14229_1.get_service_response_id(request_service_id)\n response = [service_response_id]\n # Append payload\n if response_data is not None:\n response += response_data\n return response\n\n @staticmethod\n def create_negative_response(request_service_id, nrc):\n \"\"\"\n Returns data for a negative response of 'request_service_id' with negative response code 'nrc'\n\n :param request_service_id: Service ID (SIDRQ) of the incoming request\n :param nrc: Negative response code (NRC_)\n :return: List of bytes to be sent as data payload in the response\n \"\"\"\n response = [Constants.NR_SI,\n request_service_id,\n nrc]\n return response\n\n def message_handler(self, data):\n \"\"\"\n Logic for responding to incoming messages\n\n :param data: list of data bytes in incoming message\n :return: None\n \"\"\"\n assert isinstance(data, list)\n try:\n service_id = data[0]\n # Handle different services\n if service_id == ServiceID.DIAGNOSTIC_SESSION_CONTROL:\n # 0x10 Diagnostic session control\n response_data = self.handle_diagnostic_session_control(data)\n elif service_id == ServiceID.ECU_RESET:\n # 0x11 ECU reset\n response_data = self.handle_ecu_reset(data)\n elif service_id == ServiceID.READ_DATA_BY_IDENTIFIER:\n # 0x22 Read data by identifier\n response_data = self.handle_read_data_by_identifier(data)\n elif service_id == ServiceID.READ_MEMORY_BY_ADDRESS:\n # 0x23 Read memory by address\n response_data = self.handle_read_memory_by_address(data)\n elif service_id == ServiceID.SECURITY_ACCESS:\n # 0x27 Security access\n response_data = self.handle_security_access(data)\n elif service_id == ServiceID.WRITE_DATA_BY_IDENTIFIER:\n # 0x2E Write data by identifier\n response_data = self.handle_write_data_by_identifier(data)\n else:\n # Unsupported service\n response_data = self.handle_unsupported_service(data)\n except IndexError:\n # Parsing failed due to invalid message structure\n response_data = self.handle_service_error(data)\n\n # This check makes it possible to support services where a response should not be sent\n if response_data is not None:\n # Simulate a small delay before responding\n time.sleep(self.DELAY_BEFORE_RESPONSE)\n self.diagnostics.send_response(response_data)\n\n def handle_unsupported_service(self, data):\n \"\"\"Provides a standard response for unmapped services, by responding with NRC Service Not Supported\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.SERVICE_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_service_error(self, data):\n \"\"\"Provides a standard response for failed service requests\"\"\"\n service_id = data[0]\n nrc = NegativeResponseCodes.INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_diagnostic_session_control(self, data):\n \"\"\"Evaluates a diagnostic session control request and returns a response\"\"\"\n service_id = data[0]\n # TODO Handle different values?\n session_type = data[1]\n response_data = self.create_positive_response(service_id)\n return response_data\n\n def handle_read_data_by_identifier(self, data):\n \"\"\"\n Evaluates a read data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n request = data[2]\n\n if request == self.IDENTIFIER_REQUEST_POSITIVE:\n # Request for positive response\n # TODO Actually read a parameter from memory\n payload = [self.IDENTIFIER_REQUEST_POSITIVE_RESPONSE]\n response_data = self.create_positive_response(service_id, payload)\n elif request == self.IDENTIFIER_REQUEST_NEGATIVE:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_write_data_by_identifier(self, data):\n \"\"\"\n Evaluates a write data by identifier request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n\n identifier_start_position = 1\n identifier_length = 2\n identifier = int_from_byte_list(data,\n identifier_start_position,\n identifier_length)\n request_data = data[3:]\n # TODO Actually write data to memory\n if identifier == self.REQUEST_IDENTIFIER_VALID:\n # Request for positive response\n # Standard specifies the response payload to be an echo of the data identifier from the request\n payload = data[identifier_start_position:identifier_start_position + identifier_length]\n response_data = self.create_positive_response(service_id, payload)\n elif identifier == self.REQUEST_IDENTIFIER_INVALID:\n # Request for negative response - use Conditions Not Correct\n nrc = NegativeResponseCodes.CONDITIONS_NOT_CORRECT\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unmatched request - use a general reject response\n nrc = NegativeResponseCodes.GENERAL_REJECT\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_read_memory_by_address(self, data):\n \"\"\"\n Evaluates a read memory by address request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n address_field_size = (data[1] >> 4) & 0xF\n data_length_field_size = (data[1] & 0xF)\n address_start_position = 2\n data_length_start_position = 4\n\n start_address = int_from_byte_list(data, address_start_position, address_field_size)\n data_length = int_from_byte_list(data, data_length_start_position, data_length_field_size)\n end_address = start_address + data_length\n if 0 <= start_address <= end_address <= len(self.DATA):\n memory_data = self.DATA[start_address:end_address]\n response_data = self.create_positive_response(service_id, memory_data)\n else:\n nrc = NegativeResponseCodes.REQUEST_OUT_OF_RANGE\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_ecu_reset(self, data):\n \"\"\"\n Evaluates an ECU reset request and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n reset_type = subfunction & 0x7F\n suppress_positive_response = subfunction >> 7\n\n reset_types = Services.EcuReset.ResetType\n\n if reset_type in [reset_types.HARD_RESET, reset_types.KEY_OFF_ON_RESET, reset_types.SOFT_RESET]:\n if suppress_positive_response:\n response_data = None\n else:\n response_data = self.create_positive_response(service_id, [reset_type])\n else:\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data\n\n def handle_security_access(self, data):\n \"\"\"\n Evaluates security access requests (both \"Request seed\" and \"Send key\") and returns the appropriate response\n\n :param data: Data from incoming request\n :return: Response to be sent\n \"\"\"\n service_id = data[0]\n subfunction = data[1]\n level = subfunction & 0x7F\n\n service_handler = Services.SecurityAccess.RequestSeedOrSendKey()\n if service_handler.is_valid_request_seed_level(level):\n # Request seed handling\n payload = [level]\n payload.extend(self.SECURITY_ACCESS_SEED)\n response_data = self.create_positive_response(service_id, payload)\n elif service_handler.is_valid_send_key_level(level):\n # Send key handling\n expected_key = self.SECURITY_ACCESS_KEY\n received_key = data[2:]\n if received_key == expected_key:\n # Correct key\n response_data = self.create_positive_response(service_id, [level])\n else:\n # Invalid key\n nrc = NegativeResponseCodes.INVALID_KEY\n response_data = self.create_negative_response(service_id, nrc)\n else:\n # Unsupported subfunction\n nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED\n response_data = self.create_negative_response(service_id, nrc)\n return response_data" }, { "identifier": "uds", "path": "caringcaribou/modules/uds.py", "snippet": "UDS_SERVICE_NAMES = {\n 0x10: \"DIAGNOSTIC_SESSION_CONTROL\",\n 0x11: \"ECU_RESET\",\n 0x14: \"CLEAR_DIAGNOSTIC_INFORMATION\",\n 0x19: \"READ_DTC_INFORMATION\",\n 0x20: \"RETURN_TO_NORMAL\",\n 0x22: \"READ_DATA_BY_IDENTIFIER\",\n 0x23: \"READ_MEMORY_BY_ADDRESS\",\n 0x24: \"READ_SCALING_DATA_BY_IDENTIFIER\",\n 0x27: \"SECURITY_ACCESS\",\n 0x28: \"COMMUNICATION_CONTROL\",\n 0x2A: \"READ_DATA_BY_PERIODIC_IDENTIFIER\",\n 0x2C: \"DYNAMICALLY_DEFINE_DATA_IDENTIFIER\",\n 0x2D: \"DEFINE_PID_BY_MEMORY_ADDRESS\",\n 0x2E: \"WRITE_DATA_BY_IDENTIFIER\",\n 0x2F: \"INPUT_OUTPUT_CONTROL_BY_IDENTIFIER\",\n 0x31: \"ROUTINE_CONTROL\",\n 0x34: \"REQUEST_DOWNLOAD\",\n 0x35: \"REQUEST_UPLOAD\",\n 0x36: \"TRANSFER_DATA\",\n 0x37: \"REQUEST_TRANSFER_EXIT\",\n 0x38: \"REQUEST_FILE_TRANSFER\",\n 0x3D: \"WRITE_MEMORY_BY_ADDRESS\",\n 0x3E: \"TESTER_PRESENT\",\n 0x7F: \"NEGATIVE_RESPONSE\",\n 0x83: \"ACCESS_TIMING_PARAMETER\",\n 0x84: \"SECURED_DATA_TRANSMISSION\",\n 0x85: \"CONTROL_DTC_SETTING\",\n 0x86: \"RESPONSE_ON_EVENT\",\n 0x87: \"LINK_CONTROL\"\n}\nNRC_NAMES = {\n 0x00: \"POSITIVE_RESPONSE\",\n 0x10: \"GENERAL_REJECT\",\n 0x11: \"SERVICE_NOT_SUPPORTED\",\n 0x12: \"SUB_FUNCTION_NOT_SUPPORTED\",\n 0x13: \"INCORRECT_MESSAGE_LENGTH_OR_INVALID_FORMAT\",\n 0x14: \"RESPONSE_TOO_LONG\",\n 0x21: \"BUSY_REPEAT_REQUEST\",\n 0x22: \"CONDITIONS_NOT_CORRECT\",\n 0x24: \"REQUEST_SEQUENCE_ERROR\",\n 0x25: \"NO_RESPONSE_FROM_SUBNET_COMPONENT\",\n 0x26: \"FAILURE_PREVENTS_EXECUTION_OF_REQUESTED_ACTION\",\n 0x31: \"REQUEST_OUT_OF_RANGE\",\n 0x33: \"SECURITY_ACCESS_DENIED\",\n 0x35: \"INVALID_KEY\",\n 0x36: \"EXCEEDED_NUMBER_OF_ATTEMPTS\",\n 0x37: \"REQUIRED_TIME_DELAY_NOT_EXPIRED\",\n 0x70: \"UPLOAD_DOWNLOAD_NOT_ACCEPTED\",\n 0x71: \"TRANSFER_DATA_SUSPENDED\",\n 0x72: \"GENERAL_PROGRAMMING_FAILURE\",\n 0x73: \"WRONG_BLOCK_SEQUENCE_COUNTER\",\n 0x78: \"REQUEST_CORRECTLY_RECEIVED_RESPONSE_PENDING\",\n 0x7E: \"SUB_FUNCTION_NOT_SUPPORTED_IN_ACTIVE_SESSION\",\n 0x7F: \"SERVICE_NOT_SUPPORTED_IN_ACTIVE_SESSION\",\n 0x81: \"RPM_TOO_HIGH\",\n 0x82: \"RPM_TOO_LOW\",\n 0x83: \"ENGINE_IS_RUNNING\",\n 0x84: \"ENGINE_IS_NOT_RUNNING\",\n 0x85: \"ENGINE_RUN_TIME_TOO_LOW\",\n 0x86: \"TEMPERATURE_TOO_HIGH\",\n 0x87: \"TEMPERATURE_TOO_LOW\",\n 0x88: \"VEHICLE_SPEED_TOO_HIGH\",\n 0x89: \"VEHICLE_SPEED_TOO_LOW\",\n 0x8A: \"THROTTLE_PEDAL_TOO_HIGH\",\n 0x8B: \"THROTTLE_PEDAL_TOO_LOW\",\n 0x8C: \"TRANSMISSION_RANGE_NOT_IN_NEUTRAL\",\n 0x8D: \"TRANSMISSION_RANGE_NOT_IN_GEAR\",\n 0x8F: \"BRAKE_SWITCHES_NOT_CLOSED\",\n 0x90: \"SHIFT_LEVER_NOT_IN_PARK\",\n 0x91: \"TORQUE_CONVERTER_CLUTCH_LOCKED\",\n 0x92: \"VOLTAGE_TOO_HIGH\",\n 0x93: \"VOLTAGE_TOO_LOW\"\n}\nDELAY_DISCOVERY = 0.01\nDELAY_TESTER_PRESENT = 0.5\nDELAY_SECSEED_RESET = 0.01\nTIMEOUT_SERVICES = 0.2\nTIMEOUT_SUBSERVICES = 0.02\nVERIFICATION_BACKTRACK = 5\nVERIFICATION_EXTRA_DELAY = 0.5\nBYTE_MIN = 0x00\nBYTE_MAX = 0xFF\nDUMP_DID_MIN = 0x0000\nDUMP_DID_MAX = 0xFFFF\nDUMP_DID_TIMEOUT = 0.2\n E=args.E\n E=args.E\ndef uds_discovery(E, min_id, max_id, blacklist_args, auto_blacklist_duration,\n delay, verify, print_results=True):\n def is_valid_response(message):\ndef __uds_discovery_wrapper(args):\ndef service_discovery(arb_id_request, arb_id_response, timeout,\n min_id=BYTE_MIN, max_id=BYTE_MAX, print_results=True):\ndef __service_discovery_wrapper(args):\ndef sub_discovery(arb_id_request, arb_id_response, diagnostic, service, timeout, print_results=True):\ndef __sub_discovery_wrapper(args):\ndef raw_send(arb_id_request, arb_id_response, service, session_type):\ndef tester_present(arb_id_request, delay, duration,\n suppress_positive_response):\ndef __tester_present_wrapper(args):\ndef ecu_reset(arb_id_request, arb_id_response, reset_type, timeout):\ndef __ecu_reset_wrapper(args):\ndef print_negative_response(response):\ndef __security_seed_wrapper(args):\ndef extended_session(arb_id_request, arb_id_response, session_type):\ndef request_seed(arb_id_request, arb_id_response, level,\n data_record, timeout):\ndef send_key(arb_id_request, arb_id_response, level, key, timeout):\ndef __dump_dids_wrapper(args):\ndef __auto_wrapper(args):\ndef dump_dids(arb_id_request, arb_id_response, timeout,\n min_did=DUMP_DID_MIN, max_did=DUMP_DID_MAX, print_results=True):\ndef __parse_args(args):\ndef module_main(arg_list):" } ]
from caringcaribou.utils.iso14229_1 import Constants, Iso14229_1, NegativeResponseCodes, ServiceID, Services from caringcaribou.tests.mock.mock_ecu_uds import MockEcuIso14229 from caringcaribou.modules import uds import unittest
10,860
ARB_ID_RESPONSE = 0x300F # Timeout (in seconds) when waiting for response during bruteforce BRUTEFORCE_TIMEOUT = 0.01 def setUp(self): # Initialize mock ECU self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) # Remove response delay self.ecu.DELAY_BEFORE_RESPONSE = 0.0 self.ecu.start_server() def tearDown(self): if isinstance(self.ecu, MockEcuIso14229): self.ecu.__exit__(None, None, None) def test_uds_discovery(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 blacklist = [] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_uds_discovery_blacklist(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 # Blacklist the arbitration ID used for response blacklist = [self.ARB_ID_RESPONSE] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) # No results expected due to blacklist expected_result = [] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery(self): # Service discovery arguments range_start = 0x09 range_end = 0x13 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # Supported services within specified range expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery_empty_range(self): # Service discovery arguments range_start = 0x00 range_end = 0x05 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # No services should be found within range expected_result = [] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format( result)) def test_ecu_reset_hard_reset_success(self): # ECU Reset arguments reset_type = Services.EcuReset.ResetType.HARD_RESET timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for successful request expected_response_id = Iso14229_1.get_service_response_id(Services.EcuReset.service_id) expected_result = [expected_response_id, reset_type] self.assertListEqual(result, expected_result, "ECU Reset gave '{0}', expected '{1}'".format( result, expected_result)) def test_ecu_reset_unsupported_reset_type_failure(self): # Invalid reset type reset_type = 0x00 timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for invalid request expected_response_id = Services.EcuReset.service_id
from __future__ import print_function class UdsModuleTestCase(unittest.TestCase): ARB_ID_REQUEST = 0x300E ARB_ID_RESPONSE = 0x300F # Timeout (in seconds) when waiting for response during bruteforce BRUTEFORCE_TIMEOUT = 0.01 def setUp(self): # Initialize mock ECU self.ecu = MockEcuIso14229(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) # Remove response delay self.ecu.DELAY_BEFORE_RESPONSE = 0.0 self.ecu.start_server() def tearDown(self): if isinstance(self.ecu, MockEcuIso14229): self.ecu.__exit__(None, None, None) def test_uds_discovery(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 blacklist = [] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) expected_result = [(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE)] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_uds_discovery_blacklist(self): # Discovery arguments start_arb_id = self.ARB_ID_REQUEST - 5 end_arb_id = self.ARB_ID_REQUEST + 5 # Blacklist the arbitration ID used for response blacklist = [self.ARB_ID_RESPONSE] auto_blacklist_duration = 0 timeout = self.BRUTEFORCE_TIMEOUT verify = True print_results = False # Perform UDS discovery result = uds.uds_discovery(min_id=start_arb_id, max_id=end_arb_id, blacklist_args=blacklist, auto_blacklist_duration=auto_blacklist_duration, delay=timeout, verify=verify, print_results=print_results) # No results expected due to blacklist expected_result = [] self.assertListEqual(result, expected_result, "UDS discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery(self): # Service discovery arguments range_start = 0x09 range_end = 0x13 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # Supported services within specified range expected_result = [ServiceID.DIAGNOSTIC_SESSION_CONTROL, ServiceID.ECU_RESET] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected '{1}'".format( result, expected_result)) def test_service_discovery_empty_range(self): # Service discovery arguments range_start = 0x00 range_end = 0x05 print_results = False # Perform service discovery result = uds.service_discovery(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, timeout=self.BRUTEFORCE_TIMEOUT, min_id=range_start, max_id=range_end, print_results=print_results) # No services should be found within range expected_result = [] self.assertListEqual(result, expected_result, "UDS service discovery gave '{0}', expected no hits".format( result)) def test_ecu_reset_hard_reset_success(self): # ECU Reset arguments reset_type = Services.EcuReset.ResetType.HARD_RESET timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for successful request expected_response_id = Iso14229_1.get_service_response_id(Services.EcuReset.service_id) expected_result = [expected_response_id, reset_type] self.assertListEqual(result, expected_result, "ECU Reset gave '{0}', expected '{1}'".format( result, expected_result)) def test_ecu_reset_unsupported_reset_type_failure(self): # Invalid reset type reset_type = 0x00 timeout = None # Perform ECU Reset result = uds.ecu_reset(arb_id_request=self.ARB_ID_REQUEST, arb_id_response=self.ARB_ID_RESPONSE, reset_type=reset_type, timeout=timeout) # Expected response format for invalid request expected_response_id = Services.EcuReset.service_id
expected_nrc = NegativeResponseCodes.SUB_FUNCTION_NOT_SUPPORTED
2
2023-11-13 05:05:46+00:00
16k
L1bra1/WeakMotion
train_WeakMotionNet.py
[ { "identifier": "WeakMotionNet", "path": "weak_model.py", "snippet": "class WeakMotionNet(nn.Module):\n def __init__(self, out_seq_len=1, FGBG_category_num=2, height_feat_size=13):\n super(WeakMotionNet, self).__init__()\n self.out_seq_len = out_seq_len\n\n self.motion_pred = MotionPrediction(seq_len=self.out_seq_len)\n self.FGBG_classify = FGBGEstimation(motion_category_num=FGBG_category_num)\n self.stpn = STPN(height_feat_size=height_feat_size)\n\n\n def forward(self, bevs):\n bevs = bevs.permute(0, 1, 4, 2, 3) # (Batch, seq, z, h, w)\n\n # Backbone network\n x = self.stpn(bevs)\n\n # FG/BG segmentation head\n FGBG_class_pred = self.FGBG_classify(x)\n\n # Motion Displacement prediction\n disp = self.motion_pred(x)\n disp = disp.view(-1, 2, x.size(-2), x.size(-1))\n\n return disp, FGBG_class_pred" }, { "identifier": "DatasetSingleSeq_Stage2", "path": "data/weak_nuscenes_dataloader.py", "snippet": "class DatasetSingleSeq_Stage2(Dataset):\n \"\"\"\n Generate the nuScenes training dataset for Stage2\n\n Parameters\n ----------\n dataset_root : Path to input data root directory\n weakdata_root: Path to weak supervision data root directory\n FBdata_root: Path to FG/BG masks predicted by PreSegNet in Stage1\n split : [train/val/test]\n annotation_ratio: Desired FG/BG annotation ratio. Should be consistent with the ratio in Stage1\n num_points_seg: Desired number of points in the current frame. Will be used to train the FG/BG segmentation head\n num_points_motion: Desired number of FG points in the three frames. Will be used for Chamfer loss\n \"\"\"\n def __init__(self, dataset_root=None, weakdata_root=None, FBdata_root=None, split='train', future_frame_skip=0, voxel_size=(0.25, 0.25, 0.4),\n area_extents=np.array([[-32., 32.], [-32., 32.], [-3., 2.]]), dims=(256, 256, 13), num_category=5,\n annotation_ratio=1.0, num_points_seg = 30000, num_points_motion = 12000):\n\n if dataset_root is None:\n raise ValueError(\"The {} dataset root is None. Should specify its value.\".format(split))\n\n self.dataset_root = dataset_root\n print(\"data root:\", dataset_root)\n self.weakdata_root = weakdata_root\n self.FBdata_root = FBdata_root\n\n seq_dirs = []\n if split == 'train':\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d) + '/0.npy'\n seq_dirs.append(tmp_0)\n else:\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d) + '/0.npy'\n seq_dirs.append(tmp_0)\n\n self.seq_files = seq_dirs\n self.num_sample_seqs = len(self.seq_files)\n print(\"The number of {} sequences: {}\".format(split, self.num_sample_seqs))\n\n # For training, the size of dataset should be 17065 * 1; for validation: 1719; for testing: 4309\n if split == 'train' and self.num_sample_seqs != 17065:\n warnings.warn(\">> The size of training dataset is not 17065 * 2.\\n\")\n elif split == 'val' and self.num_sample_seqs != 1719:\n warnings.warn(\">> The size of validation dataset is not 1719.\\n\")\n elif split == 'test' and self.num_sample_seqs != 4309:\n warnings.warn('>> The size of test dataset is not 4309.\\n')\n\n self.split = split\n self.voxel_size = voxel_size\n self.area_extents = area_extents\n self.future_frame_skip = future_frame_skip\n self.dims = dims\n self.annotation_ratio = annotation_ratio\n self.num_points_seg = num_points_seg\n self.num_points_motion = num_points_motion\n self.num_category = num_category\n\n def __len__(self):\n return self.num_sample_seqs\n\n def sample_foreground_point(self, pc, FGBG_label, use_GT_label=True):\n pc, not_close = remove_close(pc, radius=1.0)\n pc, filter_idx = filter_pc(pc, extents=self.area_extents)\n\n if use_GT_label:\n FGBG_label = FGBG_label[not_close]\n FGBG_label = FGBG_label[filter_idx]\n FG_mask = FGBG_label == 2\n else:\n FG_mask = FGBG_label\n\n FG_point = pc[FG_mask]\n FG_point_num = FG_point.shape[0]\n\n if FG_point_num != 0:\n if FG_point_num >= self.num_points_motion:\n sample_idx = np.random.choice(FG_point_num, self.num_points_motion, replace=False)\n FG_point_num = self.num_points_motion\n else:\n sample_idx = np.concatenate((np.arange(FG_point_num),\n np.random.choice(FG_point_num, self.num_points_motion - FG_point_num, replace=True)), axis=-1)\n FG_point = FG_point[sample_idx]\n else:\n FG_point = np.zeros((self.num_points_motion, 3))\n\n return FG_point, FG_point_num\n\n def __getitem__(self, idx):\n seq_file = self.seq_files[idx]\n gt_data_handle = np.load(seq_file, allow_pickle=True)\n gt_dict = gt_data_handle.item()\n\n dims = gt_dict['3d_dimension']\n num_future_pcs = gt_dict['num_future_pcs']\n num_past_pcs = gt_dict['num_past_pcs']\n pixel_indices = gt_dict['pixel_indices']\n\n sparse_disp_field_gt = gt_dict['disp_field']\n all_disp_field_gt = np.zeros((num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n all_disp_field_gt[:, pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n\n sparse_valid_pixel_maps = gt_dict['valid_pixel_map']\n all_valid_pixel_maps = np.zeros((num_future_pcs, dims[0], dims[1]), dtype=np.float32)\n all_valid_pixel_maps[:, pixel_indices[:, 0], pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n\n sparse_pixel_cat_maps = gt_dict['pixel_cat_map']\n pixel_cat_map = np.zeros((dims[0], dims[1], self.num_category), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n\n non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n padded_voxel_points = list()\n for i in range(num_past_pcs):\n indices = gt_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n padded_voxel_points.append(curr_voxels)\n padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)\n\n # get weak supervision\n if self.split == 'train':\n scene_name = seq_file.split('/')[-2]\n weak_file_name = os.path.join(os.path.join(self.weakdata_root, scene_name), '0.npy')\n weak_data_handle = np.load(weak_file_name, allow_pickle=True)\n weak_dict = weak_data_handle.item()\n\n # get FG/BG annotations for the current frame,\n # this procedure is the same as the data preparation in Stage1\n # 0: past frame; 1: current frame; 2: future frame\n\n pc_seg = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_seg = weak_dict['points_label_1']\n FGBG_gt_mask_seg = convert_semantic_to_FGBG(label_seg[:, 0])\n sample_idx = weak_dict['sample_idx_1']\n\n selected_num = np.floor(self.annotation_ratio * len(sample_idx)).astype(np.int64)\n selected_sample_idx = sample_idx[:selected_num]\n\n annotation_mask = np.zeros(len(sample_idx), dtype=np.float32)\n annotation_mask[selected_sample_idx] = 1 # 0: point without annotation; 1: point with annotation\n FGBG_gt_mask_seg[annotation_mask == 0] = 3 # 1: Background; 2: Foreground; 3: Unlabelled\n\n pc_seg, not_close = remove_close(pc_seg, radius=1.0)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[not_close]\n pc_seg, filter_idx = filter_pc(pc_seg, extents=self.area_extents)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[filter_idx]\n\n curr_seg_num = pc_seg.shape[0]\n if curr_seg_num >= self.num_points_seg:\n pc_sample_idx = np.random.choice(curr_seg_num, self.num_points_seg, replace=False)\n curr_seg_num = self.num_points_seg\n else:\n pc_sample_idx = np.concatenate((np.arange(curr_seg_num),\n np.random.choice(curr_seg_num, self.num_points_seg - curr_seg_num, replace=True)), axis=-1)\n point_FGBG_gt_mask_seg = FGBG_gt_mask_seg[pc_sample_idx]\n pc_seg = pc_seg[pc_sample_idx]\n\n # get foreground points in three frames for chamfer loss\n if self.annotation_ratio ==1:\n # When using full annotations, we directly extract ground truth foreground points for chamfer loss\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n label_0 = weak_dict['points_label_0']\n FGBG_gt_mask_0 = convert_semantic_to_FGBG(label_0[:, 0]) # 1: Background; 2: Foreground\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, FGBG_gt_mask_0)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_1 = weak_dict['points_label_1']\n FGBG_gt_mask_1 = convert_semantic_to_FGBG(label_1[:, 0])\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, FGBG_gt_mask_1)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n label_2 = weak_dict['points_label_2']\n FGBG_gt_mask_2 = convert_semantic_to_FGBG(label_2[:, 0])\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, FGBG_gt_mask_2)\n else:\n # When using partial annotations, we extract foreground points predicted by PreSegNet for Chamfer loss\n pred_FGBG_file_name = os.path.join(self.FBdata_root, scene_name + '.npz')\n pred_FGBG_data = np.load(pred_FGBG_file_name)\n\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n pred_FGBG_0 = pred_FGBG_data['pred_0']\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, pred_FGBG_0, use_GT_label=False)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n pred_FGBG_1 = pred_FGBG_data['pred_1']\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, pred_FGBG_1, use_GT_label=False)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n pred_FGBG_2 = pred_FGBG_data['pred_2']\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, pred_FGBG_2, use_GT_label=False)\n\n else:\n pc_seg = np.zeros(1)\n point_FGBG_gt_mask_seg = np.zeros(1)\n curr_seg_num = np.zeros(1)\n FG_point_0 = np.zeros(1)\n FG_point_num_0 = np.zeros(1)\n FG_point_1 = np.zeros(1)\n FG_point_num_1 = np.zeros(1)\n FG_point_2 = np.zeros(1)\n FG_point_num_2 = np.zeros(1)\n\n return padded_voxel_points, all_disp_field_gt, pixel_cat_map, \\\n non_empty_map, all_valid_pixel_maps, num_future_pcs, \\\n pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \\\n FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2" }, { "identifier": "DatasetSingleSeq_Stage2", "path": "data/weak_waymo_dataloader.py", "snippet": "class DatasetSingleSeq_Stage2(Dataset):\n \"\"\"\n Generate the Waymo training dataset for Stage2\n\n Parameters\n ----------\n dataset_root : Path to input data root directory\n weakdata_root: Path to weak supervision data root directory\n FBdata_root: Path to FG/BG masks predicted by PreSegNet in Stage1\n split : [train/val]\n annotation_ratio: Desired FG/BG annotation ratio. Should be consistent with the ratio in Stage1\n num_points_seg: Desired number of points in the current frame. Will be used to train the FG/BG segmentation head\n num_points_motion: Desired number of FG points in the three frames. Will be used for Chamfer loss\n \"\"\"\n def __init__(self, dataset_root=None, weakdata_root=None, FBdata_root=None, split='train', future_frame_skip=0, voxel_size=(0.25, 0.25, 0.4),\n area_extents=np.array([[-32., 32.], [-32., 32.], [-1., 4.]]), dims=(256, 256, 13), num_category=5,\n annotation_ratio=1.0, num_points_seg = 40000, num_points_motion = 12000):\n\n if dataset_root is None:\n raise ValueError(\"The {} dataset root is None. Should specify its value.\".format(split))\n\n self.dataset_root = dataset_root\n print(\"data root:\", dataset_root)\n self.weakdata_root = weakdata_root\n self.FBdata_root = FBdata_root\n\n seq_dirs = []\n if split == 'train':\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d)\n seq_dirs.append(tmp_0)\n else:\n for d in os.listdir(self.dataset_root):\n tmp_0 = os.path.join(self.dataset_root, d)\n seq_dirs.append(tmp_0)\n\n self.seq_files = seq_dirs\n self.num_sample_seqs = len(self.seq_files)\n print(\"The number of {} sequences: {}\".format(split, self.num_sample_seqs))\n\n # For training, the size of dataset should be 14351; for validation/testing: 3634\n if split == 'train' and self.num_sample_seqs != 14351:\n warnings.warn(\">> The size of training dataset is not 14351.\\n\")\n elif split == 'val' and self.num_sample_seqs != 3634:\n warnings.warn(\">> The size of validation dataset is not 3634.\\n\")\n\n\n self.split = split\n self.voxel_size = voxel_size\n self.area_extents = area_extents\n self.future_frame_skip = future_frame_skip\n self.dims = dims\n self.annotation_ratio = annotation_ratio\n self.num_points_seg = num_points_seg\n self.num_points_motion = num_points_motion\n self.num_category = num_category\n\n def __len__(self):\n return self.num_sample_seqs\n\n def sample_foreground_point(self, pc, FGBG_label, use_GT_label=True):\n pc, not_close = remove_close(pc, radius=1.0)\n pc, filter_idx = filter_pc(pc, extents=self.area_extents)\n\n if use_GT_label:\n FGBG_label = FGBG_label[not_close]\n FGBG_label = FGBG_label[filter_idx]\n FG_mask = FGBG_label == 2\n else:\n FG_mask = FGBG_label\n\n FG_point = pc[FG_mask]\n FG_point_num = FG_point.shape[0]\n\n if FG_point_num != 0:\n if FG_point_num >= self.num_points_motion:\n sample_idx = np.random.choice(FG_point_num, self.num_points_motion, replace=False)\n FG_point_num = self.num_points_motion\n else:\n sample_idx = np.concatenate((np.arange(FG_point_num),\n np.random.choice(FG_point_num, self.num_points_motion - FG_point_num, replace=True)), axis=-1)\n FG_point = FG_point[sample_idx]\n else:\n FG_point = np.zeros((self.num_points_motion, 3))\n\n return FG_point, FG_point_num\n\n def __getitem__(self, idx):\n seq_file = self.seq_files[idx]\n gt_data_handle = np.load(seq_file, allow_pickle=True)\n gt_dict = gt_data_handle.item()\n\n dims = gt_dict['3d_dimension']\n num_future_pcs = gt_dict['num_future_pcs']\n num_past_pcs = gt_dict['num_past_pcs']\n pixel_indices = gt_dict['pixel_indices']\n\n sparse_disp_field_gt = gt_dict['disp_field']\n all_disp_field_gt = np.zeros((num_future_pcs, dims[0], dims[1], 2), dtype=np.float32)\n all_disp_field_gt[:, pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_disp_field_gt[:]\n\n sparse_valid_pixel_maps = gt_dict['valid_pixel_map']\n all_valid_pixel_maps = np.zeros((num_future_pcs, dims[0], dims[1]), dtype=np.float32)\n all_valid_pixel_maps[:, pixel_indices[:, 0], pixel_indices[:, 1]] = sparse_valid_pixel_maps[:]\n\n sparse_pixel_cat_maps = gt_dict['pixel_cat_map']\n pixel_cat_map = np.zeros((dims[0], dims[1], self.num_category), dtype=np.float32)\n pixel_cat_map[pixel_indices[:, 0], pixel_indices[:, 1], :] = sparse_pixel_cat_maps[:]\n\n non_empty_map = np.zeros((dims[0], dims[1]), dtype=np.float32)\n non_empty_map[pixel_indices[:, 0], pixel_indices[:, 1]] = 1.0\n\n padded_voxel_points = list()\n for i in range(num_past_pcs):\n indices = gt_dict['voxel_indices_' + str(i)]\n curr_voxels = np.zeros(dims, dtype=np.bool)\n curr_voxels[indices[:, 0], indices[:, 1], indices[:, 2]] = 1\n padded_voxel_points.append(curr_voxels)\n padded_voxel_points = np.stack(padded_voxel_points, 0).astype(np.float32)\n\n # get weak supervision\n if self.split == 'train':\n scene_name = seq_file.split('/')[-1]\n weak_file_name = os.path.join(self.weakdata_root, scene_name)\n weak_data_handle = np.load(weak_file_name, allow_pickle=True)\n weak_dict = weak_data_handle.item()\n\n # get FG/BG annotations for the current frame,\n # this procedure is the same as the data preparation in Stage1\n # 0: past frame; 1: current frame; 2: future frame\n\n pc_seg = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_seg = weak_dict['points_label_1']\n FGBG_gt_mask_seg = convert_semantic_to_FGBG_waymo(label_seg[:, 0])\n sample_idx = weak_dict['sample_idx_1']\n\n selected_num = np.floor(self.annotation_ratio * len(sample_idx)).astype(np.int64)\n selected_sample_idx = sample_idx[:selected_num]\n\n annotation_mask = np.zeros(len(sample_idx), dtype=np.float32)\n annotation_mask[selected_sample_idx] = 1 # 0: point without annotation; 1: point with annotation\n FGBG_gt_mask_seg[annotation_mask == 0] = 3 # 1: Background; 2: Foreground; 3: Unlabelled\n\n pc_seg, not_close = remove_close(pc_seg, radius=1.0)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[not_close]\n pc_seg, filter_idx = filter_pc(pc_seg, extents=self.area_extents)\n FGBG_gt_mask_seg = FGBG_gt_mask_seg[filter_idx]\n\n curr_seg_num = pc_seg.shape[0]\n if curr_seg_num >= self.num_points_seg:\n pc_sample_idx = np.random.choice(curr_seg_num, self.num_points_seg, replace=False)\n curr_seg_num = self.num_points_seg\n else:\n pc_sample_idx = np.concatenate((np.arange(curr_seg_num),\n np.random.choice(curr_seg_num, self.num_points_seg - curr_seg_num, replace=True)), axis=-1)\n point_FGBG_gt_mask_seg = FGBG_gt_mask_seg[pc_sample_idx]\n pc_seg = pc_seg[pc_sample_idx]\n\n # get foreground points in three frames for chamfer loss\n if self.annotation_ratio ==1:\n # When using full annotations, we directly extract ground truth foreground points for chamfer loss\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n label_0 = weak_dict['points_label_0']\n FGBG_gt_mask_0 = convert_semantic_to_FGBG_waymo(label_0[:, 0]) # 1: Background; 2: Foreground\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, FGBG_gt_mask_0)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n label_1 = weak_dict['points_label_1']\n FGBG_gt_mask_1 = convert_semantic_to_FGBG_waymo(label_1[:, 0])\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, FGBG_gt_mask_1)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n label_2 = weak_dict['points_label_2']\n FGBG_gt_mask_2 = convert_semantic_to_FGBG_waymo(label_2[:, 0])\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, FGBG_gt_mask_2)\n else:\n # When using partial annotations, we extract foreground points predicted by PreSegNet for Chamfer loss\n pred_FGBG_file_name = os.path.join(self.FBdata_root, scene_name.split('.')[0] + '.npz')\n pred_FGBG_data = np.load(pred_FGBG_file_name)\n\n pc_0 = weak_dict['synchronized_pc_0'].T[:, 0:3]\n pred_FGBG_0 = pred_FGBG_data['pred_0']\n FG_point_0, FG_point_num_0 = self.sample_foreground_point(pc_0, pred_FGBG_0, use_GT_label=False)\n\n pc_1 = weak_dict['synchronized_pc_1'].T[:, 0:3]\n pred_FGBG_1 = pred_FGBG_data['pred_1']\n FG_point_1, FG_point_num_1 = self.sample_foreground_point(pc_1, pred_FGBG_1, use_GT_label=False)\n\n pc_2 = weak_dict['synchronized_pc_2'].T[:, 0:3]\n pred_FGBG_2 = pred_FGBG_data['pred_2']\n FG_point_2, FG_point_num_2 = self.sample_foreground_point(pc_2, pred_FGBG_2, use_GT_label=False)\n\n else:\n pc_seg = np.zeros(1)\n point_FGBG_gt_mask_seg = np.zeros(1)\n curr_seg_num = np.zeros(1)\n FG_point_0 = np.zeros(1)\n FG_point_num_0 = np.zeros(1)\n FG_point_1 = np.zeros(1)\n FG_point_num_1 = np.zeros(1)\n FG_point_2 = np.zeros(1)\n FG_point_num_2 = np.zeros(1)\n\n return padded_voxel_points, all_disp_field_gt, pixel_cat_map, \\\n non_empty_map, all_valid_pixel_maps, num_future_pcs, \\\n pc_seg, point_FGBG_gt_mask_seg, curr_seg_num, \\\n FG_point_0, FG_point_num_0, FG_point_1, FG_point_num_1, FG_point_2, FG_point_num_2" }, { "identifier": "FGBG_seg_loss", "path": "loss_utils.py", "snippet": "def FGBG_seg_loss(FGBG_pred, point_FGBG_gt_mask, source_pc, source_num, voxel_size, area_extents):\n \"\"\"\n Foreground Background segmentation loss\n ----------\n\n Inputs:\n FGBG_pred: [B, 2, dim_0, dim_1], predicted Foreground/Background BEV map\n point_FGBG_gt_mask: [B, N], per-point Foreground/Background ground truth, (1: BG, 2: FG, 3: Unannotated)\n source_pc: [B, N, 3], point cloud in current frame\n source_num: [B], unrepeated point number in each sample\n voxel_size, area_extents: voxel size and range of area,\n \"\"\"\n\n batch_size = FGBG_pred.shape[0]\n device = FGBG_pred.device\n\n loss_FGBG_seg = torch.zeros((1), device=device, dtype=FGBG_pred.dtype)\n\n for batch_index in range(batch_size):\n\n # get current batch\n curr_source_num = source_num[batch_index]\n curr_source_pc_np = source_pc[batch_index, :curr_source_num, :].numpy()\n curr_point_FGBG_gt_mask = point_FGBG_gt_mask[batch_index, :curr_source_num].float().to(device) # 1: Background; 2: Foreground; 3: Unannotated\n curr_FGBG_pred = FGBG_pred[batch_index]\n\n # generate FGBG ground truth and weight for each point\n curr_point_BG_gt_mask = (curr_point_FGBG_gt_mask == 1).float().unsqueeze(0)\n curr_point_FG_gt_mask = (curr_point_FGBG_gt_mask == 2).float().unsqueeze(0)\n\n curr_point_FGBG_gt_map = torch.cat([curr_point_BG_gt_mask, curr_point_FG_gt_mask], 0).permute(1, 0)\n\n # weight assigned to different categories. 0.005 for BG; 1.0 for FG; 0.0 for unlabelled\n curr_FGBG_weight_map = (curr_point_BG_gt_mask * 0.005 + curr_point_FG_gt_mask * 1.0).squeeze(0)\n curr_annotated_point_num = torch.sum((curr_point_FGBG_gt_mask != 3).float())\n\n # get FGBG prediction for each point\n curr_voxel_indices = gen_voxel_indices_for_pc(curr_source_pc_np, voxel_size, area_extents)\n curr_point_FGBG_pred = curr_FGBG_pred[:, curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]].permute(1, 0)\n\n # compute current loss\n curr_log_softmax_FGBG_pred = F.log_softmax(curr_point_FGBG_pred, dim=1)\n curr_loss_FGBG_pred = torch.sum(- curr_point_FGBG_gt_map * curr_log_softmax_FGBG_pred, dim=1) * curr_FGBG_weight_map\n curr_loss_FGBG_predd = torch.sum(curr_loss_FGBG_pred) / (curr_annotated_point_num + 1e-6)\n\n # accumulate loss\n loss_FGBG_seg = loss_FGBG_seg + curr_loss_FGBG_predd\n\n loss_FGBG_seg = loss_FGBG_seg / batch_size\n return loss_FGBG_seg" }, { "identifier": "CCD_loss", "path": "loss_utils.py", "snippet": "def CCD_loss(disp_pred, pc_0, pc_0_num, pc_1, pc_1_num, pc_2, pc_2_num, non_empty_map, voxel_size, area_extents,\n epoch, epoch_threshold=10, theta2=1):\n \"\"\"\n Consistency-aware Chamfer Distance loss\n ----------\n\n Inputs:\n disp_pred: [B, 2, dim_0, dim_1], predicted 2D displacement BEV map\n\n pc_0: [B, M, 3], predicted foreground points in the past frame (-0.5s)\n pc_0_num: [B], unrepeated foreground point number in each past frame\n\n pc_1: [B, M, 3], predicted foreground points in the current frame (0s)\n pc_1_num: [B], unrepeated foreground point number in each current frame\n\n pc_2: [B, M, 3], predicted foreground points in the future frame (+0.5s)\n pc_2_num: [B], unrepeated foreground point number in each future frame\n\n non_empty_map: [B, dim_0, dim_1] nonempty mask\n voxel_size, area_extents: voxel size and range of area,\n\n epoch: the number of current training epoch\n epoch_threshold: After epoch_threshold epochs, we start to reweight multi-frame Chamfer loss\n theta2: hyper-parameter in Gaussian kernel, used in Eq.(6)\n \"\"\"\n\n batch_size = disp_pred.shape[0]\n device = disp_pred.device\n loss_disp = torch.zeros((1), device=device, dtype=disp_pred.dtype)\n\n valid_sample_num = 0\n for batch_index in range(batch_size):\n\n # 0: past frame; 1: current frame; 2: future frame\n curr_pc_0_num = pc_0_num[batch_index]\n curr_pc_1_num = pc_1_num[batch_index]\n curr_pc_2_num = pc_2_num[batch_index]\n if (curr_pc_0_num > 0) and (curr_pc_1_num > 0) and (curr_pc_2_num > 0):\n valid_sample_num = valid_sample_num + 1\n curr_valid_map = non_empty_map[batch_index]\n\n # get source and target point clouds, predicted 2D BEV flow\n curr_pc_0_np = pc_0[batch_index, :curr_pc_0_num, :].numpy() # target pc, past frame\n curr_pc_1_np = pc_1[batch_index, :curr_pc_1_num, :].numpy() # current pc, source frame\n curr_pc_2_np = pc_2[batch_index, :curr_pc_2_num, :].numpy() # target pc, future frame\n curr_disp_pred = disp_pred[batch_index, :, :, :]\n\n # get predicted 3D flow for each point\n curr_voxel_indices = gen_voxel_indices_for_pc(curr_pc_1_np, voxel_size, area_extents)\n curr_point_disp_pred = curr_disp_pred[:, curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]].permute(1, 0)\n\n # get FG and BG map for the current frame, the map is estimated by the PreSegNet in Stage1\n curr_fg_map = torch.zeros_like(curr_valid_map)\n curr_fg_map[curr_voxel_indices[:, 0], curr_voxel_indices[:, 1]] = 1\n curr_fg_map = curr_fg_map * curr_valid_map\n fg_voxel_num = torch.sum(curr_fg_map)\n\n curr_bg_map = (1 - curr_fg_map) * curr_valid_map\n bg_voxel_num = torch.sum(curr_bg_map)\n\n curr_pc_0 = torch.from_numpy(curr_pc_0_np).to(device).float()\n curr_pc_1 = torch.from_numpy(curr_pc_1_np).to(device).float()\n curr_pc_2 = torch.from_numpy(curr_pc_2_np).to(device).float()\n curr_point_3d_disp_pred = torch.cat([curr_point_disp_pred, torch.zeros_like(curr_point_disp_pred[:, 0:1])], -1)\n\n # compute confidence weights for the three point clouds\n if epoch > epoch_threshold:\n # After epoch_threshold epochs, we start to reweight multi-frame Chamfer loss\n weight_P, weight_C, weight_F = gen_confidence_weight(curr_pc_0, curr_pc_1, curr_pc_2, curr_point_3d_disp_pred, theta2=theta2)\n else:\n weight_P, weight_C, weight_F = None, None, None\n\n # Consistency-aware Chamfer Distance loss function for the foreground points\n # backward term (backward warped current frame, past frame)\n warped_source_pc_backward = curr_pc_1 - curr_point_3d_disp_pred\n fg_loss_backward = weighted_chamfer_loss(warped_source_pc_backward, curr_pc_0, weight_C, weight_P)\n\n # forward term (forward warped current frame, future frame)\n warped_source_pc_forward = curr_pc_1 + curr_point_3d_disp_pred\n fg_loss_forward = weighted_chamfer_loss(warped_source_pc_forward, curr_pc_2, weight_C, weight_F)\n\n fg_loss = (fg_loss_backward + fg_loss_forward) / 2.0\n\n # generate loss for the background points. Eq.(13)\n bg_gt = torch.zeros_like(curr_disp_pred) # background points are regarded as static\n bg_loss = torch.sum(torch.abs(curr_disp_pred * curr_bg_map.unsqueeze(0) - bg_gt * curr_bg_map.unsqueeze(0)), 0)\n bg_loss = torch.sum(bg_loss) / (torch.sum(curr_bg_map) + 1e-6)\n\n # combine the losses from the foreground and the background. Eq.(12)\n curr_loss = (fg_loss * fg_voxel_num + 0.005 * bg_loss * bg_voxel_num) \\\n / (fg_voxel_num + bg_voxel_num + 1e-6)\n\n loss_disp = loss_disp + curr_loss\n\n loss_disp = loss_disp / valid_sample_num\n return loss_disp" }, { "identifier": "evaluate_FGBG_prediction", "path": "evaluation_utils.py", "snippet": "def evaluate_FGBG_prediction(FGBG_pred, non_empty_map_numpy, pixel_cat_map_gt_numpy, overall_cls_gt, overall_cls_pred,\n datatype='nuScenes'):\n\n # Convert the category map\n max_prob = np.amax(pixel_cat_map_gt_numpy, axis=-1)\n filter_mask = max_prob == 1.0 # Note: some of the cell probabilities are soft probabilities\n pixel_cat_map_numpy = np.argmax(pixel_cat_map_gt_numpy,\n axis=-1) + 1 # category starts from 1 (background), etc\n\n # Convert category label to FG/BG label\n pixel_FGBG_map_numpy = pixel_cat_map_numpy.copy()\n if datatype == 'nuScenes':\n # 1: background or empty; 2: Vehicle; 3: Ped; 4: Bike; 5: Others\n pixel_FGBG_map_numpy[pixel_FGBG_map_numpy > 1] = 2\n elif datatype == 'Waymo':\n # 1: background or empty; 2: Vehicle; 3: Ped; 4: Cyclist; 5: Sign, regarded as background\n tmp = pixel_FGBG_map_numpy.copy()\n pixel_FGBG_map_numpy[tmp > 1] = 2\n pixel_FGBG_map_numpy[(tmp == 5)] = 1\n\n pixel_FGBG_map_numpy = (pixel_FGBG_map_numpy * non_empty_map_numpy * filter_mask).astype(\n np.int32) # 0: Empty; 1: Background; 2: Foreground\n\n FGBG_pred_numpy = FGBG_pred.cpu().numpy()\n FGBG_pred_numpy = np.transpose(FGBG_pred_numpy, (0, 2, 3, 1))\n FGBG_pred_numpy = np.argmax(FGBG_pred_numpy, axis=-1) + 1\n FGBG_pred_numpy = (FGBG_pred_numpy * non_empty_map_numpy * filter_mask).astype(np.int32)\n\n border = 8\n roi_mask = np.zeros_like(non_empty_map_numpy)\n roi_mask[:, border:-border, border:-border] = 1.0\n\n # For computing confusion matrix, in order to compute FG/BG classification accuracy for each category\n count_mask = non_empty_map_numpy * filter_mask * roi_mask\n idx_fg = np.where(count_mask > 0)\n\n overall_cls_gt.append(pixel_FGBG_map_numpy[idx_fg])\n overall_cls_pred.append(FGBG_pred_numpy[idx_fg])\n\n return overall_cls_gt, overall_cls_pred" }, { "identifier": "evaluate_motion_prediction", "path": "evaluation_utils.py", "snippet": "def evaluate_motion_prediction(disp_pred, FGBG_pred, all_disp_field_gt, all_valid_pixel_maps, future_steps,\n distance_intervals, selected_future_sweeps, cell_groups,\n use_FGBG_pred_masking=True, datatype='nuScenes'):\n\n pred_shape = disp_pred.size()\n disp_pred = disp_pred.view(all_disp_field_gt.size(0), -1, pred_shape[-3], pred_shape[-2], pred_shape[-1])\n disp_pred = disp_pred.contiguous()\n disp_pred = disp_pred.cpu().numpy()\n\n if use_FGBG_pred_masking:\n FGBG_pred_numpy = FGBG_pred.cpu().numpy()\n FGBG_pred_numpy = np.argmax(FGBG_pred_numpy, axis=1)\n mask = FGBG_pred_numpy == 0 # predicted Background mask\n\n # For those with very small movements, we consider them as static\n last_pred = disp_pred[:, -1, :, :, :]\n last_pred_norm = np.linalg.norm(last_pred, ord=2, axis=1) # out: (batch, h, w)\n thd_mask = last_pred_norm <= 0.2\n\n cat_weight_map = np.ones_like(FGBG_pred_numpy, dtype=np.float32)\n cat_weight_map[mask] = 0.0\n cat_weight_map[thd_mask] = 0.0\n cat_weight_map = cat_weight_map[:, np.newaxis, np.newaxis, ...] # (batch, 1, 1, h, w)\n\n disp_pred = disp_pred * cat_weight_map # small motion, static, background\n\n\n # Pre-processing\n all_disp_field_gt = all_disp_field_gt.numpy() # (bs, seq, h, w, channel)\n future_steps = future_steps.numpy()[0]\n\n valid_pixel_maps = all_valid_pixel_maps[:, -future_steps:, ...].contiguous()\n valid_pixel_maps = valid_pixel_maps.numpy()\n\n all_disp_field_gt = all_disp_field_gt[:, -future_steps:, ]\n all_disp_field_gt = np.transpose(all_disp_field_gt, (0, 1, 4, 2, 3))\n all_disp_field_gt_norm = np.linalg.norm(all_disp_field_gt, ord=2, axis=2)\n\n upper_thresh = 0.2\n if datatype == 'nuScenes':\n upper_bound = 1 / 20 * upper_thresh\n elif datatype == 'Waymo':\n upper_bound = 1 / 10 * upper_thresh\n\n static_cell_mask = all_disp_field_gt_norm <= upper_bound\n static_cell_mask = np.all(static_cell_mask, axis=1) # along the temporal axis\n moving_cell_mask = np.logical_not(static_cell_mask)\n\n for j, d in enumerate(distance_intervals):\n for slot, s in enumerate((selected_future_sweeps - 1)): # selected_future_sweeps: [4, 8, ...]\n curr_valid_pixel_map = valid_pixel_maps[:, s]\n\n if j == 0: # corresponds to static cells\n curr_mask = np.logical_and(curr_valid_pixel_map, static_cell_mask)\n else:\n # We use the displacement between keyframe and the last sample frame as metrics\n last_gt_norm = all_disp_field_gt_norm[:, -1]\n mask = np.logical_and(d[0] <= last_gt_norm, last_gt_norm < d[1])\n\n curr_mask = np.logical_and(curr_valid_pixel_map, mask)\n curr_mask = np.logical_and(curr_mask, moving_cell_mask)\n\n # we evaluate the performance for cells within the range [-30m, 30m] along both x, y dimensions.\n border = 8\n roi_mask = np.zeros_like(curr_mask, dtype=np.bool_)\n roi_mask[:, border:-border, border:-border] = True\n curr_mask = np.logical_and(curr_mask, roi_mask)\n\n cell_idx = np.where(curr_mask == True)\n\n gt = all_disp_field_gt[:, s]\n pred = disp_pred[:, -1, :, :, :]\n norm_error = np.linalg.norm(gt - pred, ord=2, axis=1)\n\n cell_groups[j][slot].append(norm_error[cell_idx])\n\n return cell_groups" } ]
import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import numpy as np import time import sys import argparse import os from shutil import copytree, copy from weak_model import WeakMotionNet from data.weak_nuscenes_dataloader import DatasetSingleSeq_Stage2 from data.weak_waymo_dataloader import DatasetSingleSeq_Stage2 as DatasetSingleSeq_Stage2_waymo from sklearn.metrics import confusion_matrix from tqdm import tqdm from loss_utils import FGBG_seg_loss, CCD_loss from evaluation_utils import evaluate_FGBG_prediction, evaluate_motion_prediction
11,245
Reference: MotionNet (https://www.merl.com/research/?research=license-request&sw=MotionNet) """ class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes':
""" Train WeakMotionNet in Stage2 Some of the code are modified based on 'train_single_seq.py' in MotionNet. Reference: MotionNet (https://www.merl.com/research/?research=license-request&sw=MotionNet) """ class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self, name, fmt=':f'): self.name = name self.fmt = fmt self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def __str__(self): fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})' return fmtstr.format(**self.__dict__) def check_folder(folder_path): if not os.path.exists(folder_path): os.mkdir(folder_path) return folder_path out_seq_len = 1 # The number of future frames we are going to predict height_feat_size = 13 # The size along the height dimension parser = argparse.ArgumentParser() parser.add_argument('-md', '--motiondata', default='/path_to/nuScenes/input-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-wd', '--weakdata', default='/path_to/nuScenes/weak-data/train/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('-FBd', '--FBdata', default='/path_to/nuScenes/FGBG-data/nuscenes_seg_0-01/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--datatype', default='nuScenes', type=str, choices=['Waymo', 'nuScenes']) parser.add_argument('-t', '--evaldata', default='/path_to/nuScenes/input-data/val/', type=str, help='The path to the preprocessed sparse BEV training data') parser.add_argument('--resume', default='', type=str, help='The path to the saved model that is loaded to resume training') parser.add_argument('--batch', default=8, type=int, help='Batch size') parser.add_argument('--nepoch', default=60, type=int, help='Number of epochs') parser.add_argument('--nworker', default=4, type=int, help='Number of workers') parser.add_argument('--log', default=True, action='store_true', help='Whether to log') parser.add_argument('--logpath', default='', help='The path to the output log file') parser.add_argument('--gpu', default='1') parser.add_argument('--annotation_ratio', default=0.01, type=float) args = parser.parse_args() print(args) num_epochs = args.nepoch need_log = args.log BATCH_SIZE = args.batch num_workers = args.nworker os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu datatype = args.datatype annotation_ratio = args.annotation_ratio def main(): start_epoch = 1 # Whether to log the training information if need_log: logger_root = args.logpath if args.logpath != '' else 'logs' time_stamp = time.strftime("%Y-%m-%d_%H-%M-%S") if args.resume == '': model_save_path = check_folder(logger_root) model_save_path = check_folder(os.path.join(model_save_path, 'Stage2')) model_save_path = check_folder(os.path.join(model_save_path, time_stamp)) log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "w") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[0:]))) saver.write(args.__repr__() + "\n\n") saver.flush() else: model_save_path = args.resume log_file_name = os.path.join(model_save_path, 'log.txt') saver = open(log_file_name, "a") saver.write("GPU number: {}\n".format(torch.cuda.device_count())) saver.flush() # Logging the details for this experiment saver.write("command line: {}\n".format(" ".join(sys.argv[1:]))) saver.write(args.__repr__() + "\n\n") saver.flush() # Specify gpu device device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device_num = torch.cuda.device_count() print("device number", device_num) voxel_size = (0.25, 0.25, 0.4) if datatype == 'nuScenes': area_extents = np.array([[-32., 32.], [-32., 32.], [-3., 2.]]) elif datatype == 'Waymo': area_extents = np.array([[-32., 32.], [-32., 32.], [-1., 4.]]) tmp = args.motiondata trainset_split = tmp.split('/')[-1] if tmp.split('/')[-1] is not '' else tmp.split('/')[-2] if datatype == 'nuScenes':
trainset = DatasetSingleSeq_Stage2(dataset_root=args.motiondata, weakdata_root=args.weakdata,
2
2023-11-12 07:03:29+00:00
16k
c3exchange/c3-smartcontracts-v1
contracts_unified/core/main.py
[ { "identifier": "update", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef update() -> Expr:\n \"\"\"Implements the contract method called on update\"\"\"\n\n return sender_is_creator()" }, { "identifier": "delete", "path": "contracts_unified/core/bare_calls/update.py", "snippet": "@Subroutine(TealType.none)\ndef delete() -> Expr:\n \"\"\"Implements the contract method called on delete\"\"\"\n\n return sender_is_creator()" }, { "identifier": "account_move", "path": "contracts_unified/core/methods/account_move.py", "snippet": "@ABIReturnSubroutine\ndef account_move(\n source_account: AccountAddress,\n user_op: OperationMetaData,\n delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Moves funds between two accounts\n\n Arguments:\n\n source_account (AccountAddress): Source account address.\n user_op (OperationMetaData): Operation metadata containing destination account, cash and pool.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n _server_data (abi.DynamicBytes): Server data. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n # Constants\n abi_false = abi.Bool()\n\n # Extracted operation data\n data = AccountMoveData()\n cash = abi.make(SignedInstrumentBasket)\n pool = abi.make(SignedInstrumentBasket)\n\n # Sender and receiver accounts\n destination_account = AccountAddress()\n\n # Health check\n health = ExcessMargin()\n\n i = abi.Uint64()\n length = abi.Uint64()\n abi_zero_int = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n abi_zero_int.set(Int(0)),\n\n # Validate sender\n cast(Expr, sender_is_sig_validator()),\n\n # No delegation is allowed for account move\n Assert(delegation_chain.length() == Int(0)),\n\n # Get the source and destination accounts\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.AccountMove)),\n data.destination_account.store_into(destination_account),\n data.cash.store_into(cash),\n data.pool.store_into(pool),\n )\n ),\n\n # Validate the source account is not the destination account\n Assert(source_account.get() != destination_account.get()),\n\n # Check the closer-to-zero condition for the pool basket\n cast(Expr, closer_to_zero(source_account, pool)),\n\n # Update both users to the current index\n length.set(pool.length()),\n For(i.set(Int(0)), i.get() < length.get(), i.set(i.get() + Int(1))).Do(\n pool[i.get()].use(lambda instrument_amount:\n instrument_amount.instrument.use(lambda instrument:\n Seq(\n cast(Expr, perform_pool_move(source_account, instrument, abi_zero_int)),\n cast(Expr, perform_pool_move(destination_account, instrument, abi_zero_int))\n )\n )\n )\n ),\n\n # Perform update\n cast(Expr, signed_account_move_baskets(source_account, destination_account, cash, pool, abi_false, abi_false)),\n\n # Check health\n # NOTE: No need to check old vs new because all account moves make health worse\n health.set(health_check(source_account, abi_false)),\n Assert(Not(signed_ltz(health.get()))),\n )" }, { "identifier": "clean_orders", "path": "contracts_unified/core/methods/clean_orders.py", "snippet": "@ABIReturnSubroutine\ndef clean_orders(\n orders: abi.DynamicArray[OrderData],\n) -> Expr:\n \"\"\"\n Clean any expired orders from the order book\n\n Arguments:\n\n orders: The orders to analyze.\n \"\"\"\n\n i = abi.Uint64()\n length = abi.Uint64()\n order_data = OrderData()\n order_id = abi.make(OrderId)\n\n return Seq(\n # Loop through all orders\n length.set(orders.length()),\n For(i.set(Int(0)), i.get() < length.get(), i.set(i.get() + Int(1))).Do(\n # Check if order is expired\n order_data.set(orders[i.get()]),\n order_data.expiration_time.use(lambda expires:\n If(Global.latest_timestamp() > expires.get())\n .Then(\n # Delete order\n order_id.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_id(order_data))),\n cast(Expr, OrderStateHandler.delete_order_onchain(order_id)),\n )\n ),\n ),\n )" }, { "identifier": "create", "path": "contracts_unified/core/methods/create.py", "snippet": "@ABIReturnSubroutine\ndef create(\n pricecaster_id: EncodedAppId,\n wormhole_token_bridge_id: EncodedAppId,\n liquidation_factors: EncodedLiquidationFactors,\n withdraw_buffer_address: abi.Address,\n signature_validator_address: abi.Address,\n operator_address: abi.Address,\n quant_address: abi.Address,\n fee_target_address: abi.Address,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the contract method called at creation time\"\"\"\n\n return Seq(\n # Generate budget for the call\n setup(opup_budget.get()),\n\n # Initialize global state\n GlobalStateHandler.set_init_timestamp(),\n GlobalStateHandler.set_instrument_count(Int(0)),\n GlobalStateHandler.set_pricecaster_id(pricecaster_id.get()),\n GlobalStateHandler.set_wormhole_bridge_id(wormhole_token_bridge_id.get()),\n GlobalStateHandler.set_liquidation_factors(liquidation_factors.get()),\n GlobalStateHandler.set_withdraw_buffer(withdraw_buffer_address.get()),\n GlobalStateHandler.set_signature_validator(signature_validator_address.get()),\n GlobalStateHandler.set_operator_address(operator_address.get()),\n GlobalStateHandler.set_quant_address(quant_address.get()),\n GlobalStateHandler.set_fee_target(fee_target_address.get()),\n )" }, { "identifier": "deposit", "path": "contracts_unified/core/methods/deposit.py", "snippet": "@ABIReturnSubroutine\ndef deposit(\n account: AccountAddress,\n deposit_txn: abi.Transaction,\n payload: DepositWord,\n instrument_id: InstrumentId,\n instant_pool_move: Amount,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the standard Deposit contract method.\n\n Arguments:\n\n account (AccountAddress): Target account address to deposit to.\n deposit_txn (Transaction): The ABI \"Transaction-Type\" argument referencing the previous transaction to this call in the \"Standard Deposit\" group. Must be of type \"payment\" of \"asset transfer\".\n payload (DepositWord): Payload, must equal to \"Deposit\" string-literal.\n instrument_id (InstrumentId): Instrument to transfer.\n instant_pool_move (Amount): Optional amount to move to instant pool.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n\"\"\"\n\n deposit_asset_id = abi.Uint64()\n deposit_amount = abi.Uint64()\n element = InstrumentListElement()\n\n return Seq(\n # Generate budget for deposit\n setup(opup_budget.get()),\n\n # Validate deposit transaction\n Assert(\n And(\n # We don't really need to check rekey_to field,\n # but it's still good for us if we don't have to support unintended use cases.\n deposit_txn.get().rekey_to() == Global.zero_address(),\n deposit_txn.get().asset_close_to() == Global.zero_address(),\n )\n ),\n\n # Get deposit info from transaction\n Cond(\n [deposit_txn.get().type_enum() == TxnType.AssetTransfer, Seq(\n Assert(deposit_txn.get().asset_receiver() == Global.current_application_address()),\n deposit_asset_id.set(deposit_txn.get().xfer_asset()),\n deposit_amount.set(deposit_txn.get().asset_amount()),\n )],\n [deposit_txn.get().type_enum() == TxnType.Payment, Seq(\n Assert(deposit_txn.get().receiver() == Global.current_application_address()),\n deposit_asset_id.set(Int(0)),\n deposit_amount.set(deposit_txn.get().amount()),\n )],\n ),\n\n # Validate deposit asset is given instrument ID\n element.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n element.asset_id.use(lambda asset_id: Assert(deposit_asset_id.get() == asset_id.get())),\n\n # Perform deposit\n cast(Expr, inner_deposit_asset(account, payload, instrument_id, deposit_amount, instant_pool_move)),\n )" }, { "identifier": "fund_mbr", "path": "contracts_unified/core/methods/fund_mbr.py", "snippet": "@ABIReturnSubroutine\ndef fund_mbr(\n payment_txn: abi.PaymentTransaction,\n) -> Expr:\n \"\"\"Register payment in algos for the MBR fund of the contract\n\n Arguments:\n\n payment_txn: The payment transaction that will fund this contract\"\"\"\n\n return Seq(\n Assert(payment_txn.get().receiver() == Global.current_application_address()),\n GlobalStateHandler.add_mbr_fund(payment_txn.get().amount())\n )" }, { "identifier": "liquidate", "path": "contracts_unified/core/methods/liquidate.py", "snippet": "@ABIReturnSubroutine\ndef liquidate(\n liquidator_account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Performs liquidation of a user's position\"\"\"\n\n # Constants\n abi_false = abi.Bool()\n abi_true = abi.Bool()\n abi_zero = Ratio()\n\n # Liquidation data\n data = LiquidationData()\n\n liquidatee_account = AccountAddress()\n liquidatee_maint_health = ExcessMargin()\n\n cash = abi.make(SignedInstrumentBasket)\n pool = abi.make(SignedInstrumentBasket)\n\n liquidator_health = ExcessMargin()\n\n factors = LiquidationFactors()\n cash_factor = Ratio()\n pool_factor = Ratio()\n\n cash_take_value = Price()\n pool_take_value = Price()\n pool_give_value = Price()\n\n alpha_numerator = ExcessMargin()\n alpha_denominator = ExcessMargin()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n abi_true.set(Int(1)),\n abi_zero.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Extract liquidation data\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.Liquidate)),\n data.liquidatee.store_into(liquidatee_account),\n data.cash.store_into(cash),\n data.pool.store_into(pool),\n )\n ),\n\n # Validate liquidatee is not liquidator\n Assert(liquidatee_account.get() != liquidator_account.get()),\n\n # Validate liquidatee is liquidatable\n liquidatee_maint_health.set(health_check(liquidatee_account, abi_true)),\n Assert(signed_ltz(liquidatee_maint_health.get())),\n\n # Perform netting on liquidatee account\n cast(Expr, perform_netting(liquidatee_account, liquidator_account)),\n\n # Get global constants\n factors.decode(GlobalStateHandler.get_liquidation_factors()),\n cash_factor.set(factors.cash_liquidation_factor),\n pool_factor.set(factors.pool_liquidation_factor),\n\n # Check the closer-to-zero condition for the pool basket\n cast(Expr, closer_to_zero(liquidatee_account, pool)),\n\n # Calculate basket values\n # NOTE: The cash_take_value and pool_give_value use the cash_factor, where as the pool_take_value uses the pool_factor\n # See the formulas from the design doc for more info.\n cash_take_value.set(calculate_basket_value(cash, abi_false, cash_factor, abi_true, abi_true, abi_false)),\n pool_take_value.set(calculate_basket_value(pool, abi_false, pool_factor, abi_true, abi_true, abi_false)),\n pool_give_value.set(calculate_basket_value(pool, abi_true, cash_factor, abi_true, abi_false, abi_false)),\n\n # Check inequality is satisfied\n Assert(cash_take_value.get() + pool_take_value.get() <= pool_give_value.get()),\n\n # Ensure fairness by calculating alpha and scaling the baskets\n # alpha = health(initial) / (initial_haircut * take_assets * price + initial_haircut * (1 - opt_util) * take_liabilities * price - (1 + initial_margin) * give_liabilities * price)\n # NOTE: health_check sets up the local state handler for itself, so we don't need to\n # NOTE: Reusing the above variables for the values used when calculating the denominator\n alpha_numerator.set(health_check(liquidatee_account, abi_false)),\n cash_take_value.set(calculate_basket_value(cash, abi_false, abi_zero, abi_false, abi_false, abi_false)),\n pool_take_value.set(calculate_basket_value(pool, abi_false, abi_zero, abi_false, abi_false, abi_true)),\n pool_give_value.set(calculate_basket_value(pool, abi_true, abi_zero, abi_false, abi_true, abi_false)),\n alpha_denominator.set(pool_give_value.get() - (cash_take_value.get() + pool_take_value.get())),\n\n # Clamp alpha to be between 0 and 1\n alpha_numerator.set(signed_abs(alpha_numerator.get())),\n\n If(alpha_numerator.get() > alpha_denominator.get())\n .Then(alpha_numerator.set(alpha_denominator.get())),\n\n # Scale the basket values to be fair\n cash.set(cast(abi.ReturnedValue, scale_basket(cash, alpha_numerator, alpha_denominator))),\n pool.set(cast(abi.ReturnedValue, scale_basket(pool, alpha_numerator, alpha_denominator))),\n\n # Perform liquidation swaps, all relevant glboal indexes are updated after netting\n cast(Expr, signed_account_move_baskets(liquidatee_account, liquidator_account, cash, pool, abi_false, abi_true)),\n\n # Verify liquidator is still healthy\n # NOTE: Liquidator must always be in the green after liquidation\n # NOTE: Liquidatee will always be healthier by design\n liquidator_health.set(health_check(liquidator_account, abi_false)),\n Assert(Not(signed_ltz(liquidator_health.get()))),\n )" }, { "identifier": "pool_move", "path": "contracts_unified/core/methods/pool_move.py", "snippet": "@ABIReturnSubroutine\ndef pool_move(\n account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n _server_data: abi.DynamicBytes,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Transfers instruments from user's address to the pool\n\n Arguments:\n\n account (AccountAddress): User's account address.\n user_op (OperationMetaData): Operation metadata containing a basket of instruments.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n _server_data (abi.DynamicBytes): Server data. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_false = abi.Bool()\n\n user_old_health = ExcessMargin()\n user_health = ExcessMargin()\n\n data = PoolMoveData()\n instrument = InstrumentId()\n amount = SignedAmount()\n\n user_data = UserInstrumentData()\n price = Price()\n cash = Amount()\n neg_cash = SignedAmount()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Load constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Get basket from user_op.data\n user_op.operation.use(lambda op_data:\n Seq(\n data.decode(op_data.get()),\n data.operation.use(lambda op: Assert(op.get() == OperationId.PoolMove)),\n instrument.set(data.instrument),\n amount.set(data.amount),\n )\n ),\n\n # Get old health\n user_old_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n\n # Move funds\n cast(Expr, perform_pool_move(account, instrument, amount)),\n\n # When there is a negative movement, we need to check that the user can support itself without netting\n If(signed_ltz(amount.get())).Then(\n # Get instrument price\n price.set(cast(abi.ReturnedValue, get_normalized_price(instrument))),\n # Extract user cash\n user_data.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument))),\n cash.set(user_data.cash),\n neg_cash.set(signed_neg(cash.get())),\n # Remove all user cash temporarily\n cast(Expr, signed_add_to_cash(account, instrument, neg_cash)),\n # Recalculate health without netting the borrowed asset, ensure it is positive\n user_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n user_health.set(signed_add(user_health.get(), WideRatio([price.get(), cash.get()], [Int(PRICECASTER_RESCALE_FACTOR)]))),\n Assert(Not(signed_ltz(user_health.get()))),\n # Add all the cash back\n cast(Expr, signed_add_to_cash(account, instrument, cash)),\n ),\n\n # Validate user is still healthy\n user_health.set(cast(abi.ReturnedValue, health_check(account, abi_false))),\n Assert(Or(Not(signed_ltz(user_health.get())), signed_gte(user_health.get(), user_old_health.get()))),\n )" }, { "identifier": "portal_transfer", "path": "contracts_unified/core/methods/portal_transfer.py", "snippet": "@ABIReturnSubroutine\ndef portal_transfer(vaa: abi.DynamicBytes, *, output: abi.DynamicBytes) -> Expr:\n \"\"\"\n\n Called at the end of a transfer from the portal to C3 and\n use as a \"marker\" and VAA source for the deposit operation.\n\n Decoding and validation of the VAA, along with sender check is performed\n in the deposit operation, where this txn is referenced.\n\n \"\"\"\n\n return Seq(\n Assert(Len(vaa.get()) != Int(0), comment=\"Empty VAA\"),\n # Anything works here, since wormhole requires some value\n output.set(Bytes(\"base16\", \"0x00\")),\n )" }, { "identifier": "settle", "path": "contracts_unified/core/methods/settle.py", "snippet": "@ABIReturnSubroutine\ndef settle(\n add_order_txn: abi.ApplicationCallTransaction,\n buy_account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n server_args: SettleExtraData,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Settles two orders\n\n Arguments:\n\n add_order_txn (ApplicationCallTransaction): The previous add_order transaction in this group that added the sell order to the order book.\n buy_account (AccountAddress): The buyer user's account address.\n user_op (OperationMetaData): Operation metadata containing buyer order data.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n server_args (SettleExtraData): Extra data for the settle operation.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_false = abi.Bool()\n add_order_op = OperationMetaData()\n add_order_data = abi.make(abi.DynamicBytes)\n\n buy_order = OrderData()\n sell_order = OrderData()\n\n sell_account = AccountAddress()\n\n buy_order_id = abi.make(OrderId)\n sell_order_id = abi.make(OrderId)\n\n buy_order_onchain = OnChainOrderData()\n sell_order_onchain = OnChainOrderData()\n\n # Amounts for each order's buy/sell side\n buyer_sell_amount = Amount()\n buyer_buy_amount = Amount()\n seller_sell_amount = Amount()\n seller_buy_amount = Amount()\n\n # Remaining amounts for each order's buy/sell side\n buyer_sell_remaining = Amount()\n buyer_borrow_remaining = Amount()\n buyer_repay_remaining = Amount()\n\n seller_sell_remaining = Amount()\n seller_borrow_remaining = Amount()\n seller_repay_remaining = Amount()\n\n # New remaining amounts for each order's buy/sell side\n buyer_new_sell_remaining = Amount()\n buyer_new_borrow_remaining = Amount()\n buyer_new_repay_remaining = Amount()\n\n seller_new_sell_remaining = Amount()\n seller_new_borrow_remaining = Amount()\n seller_new_repay_remaining = Amount()\n\n buyer_new_order_onchain = OnChainOrderData()\n seller_new_order_onchain = OnChainOrderData()\n\n buyer_buy_instrument = InstrumentId()\n buyer_sell_instrument = InstrumentId()\n seller_buy_instrument = InstrumentId()\n seller_sell_instrument = InstrumentId()\n\n buyer_to_send = Amount()\n seller_to_send = Amount()\n\n buyer_to_borrow = Amount()\n seller_to_borrow = Amount()\n buyer_to_repay = Amount()\n seller_to_repay = Amount()\n\n buyer_buy_delta = Amount()\n seller_buy_delta = Amount()\n buyer_sell_delta = Amount()\n seller_sell_delta = Amount()\n\n neg_borrow = SignedAmount()\n\n buyer_fees = Amount()\n seller_fees = Amount()\n\n buyer_old_health = ExcessMargin()\n buyer_health = ExcessMargin()\n seller_old_health = ExcessMargin()\n seller_health = ExcessMargin()\n\n buyer_negative_margin = Boolean()\n seller_negative_margin = Boolean()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Set constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # Extract the buy order\n user_op.operation.use(lambda op_data:\n Seq(\n buy_order.decode(op_data.get()),\n buy_order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),\n buy_order.account.use(lambda acc: Assert(acc.get() == buy_account.get())),\n )\n ),\n\n # Add the order to the order book\n cast(Expr, OrderStateHandler.add_order(buy_order)),\n\n # Validate the sell order\n Assert(add_order_txn.get().application_id() == Global.current_application_id()),\n Assert(add_order_txn.get().on_completion() == OnComplete.NoOp),\n Assert(add_order_txn.get().application_args.length() == ADD_ORDER_ARG_COUNT),\n Assert(add_order_txn.get().application_args[ARG_INDEX_SELECTOR] == ADD_ORDER_SIG),\n\n # Get the sell order\n sell_account.decode(add_order_txn.get().application_args[ARG_INDEX_ACCOUNT]),\n add_order_op.decode(add_order_txn.get().application_args[ARG_INDEX_OP]),\n add_order_op.operation.store_into(add_order_data),\n sell_order.decode(add_order_data.get()),\n\n # Get order IDs\n buy_order_id.set(OrderStateHandler.get_order_id(buy_order)),\n sell_order_id.set(OrderStateHandler.get_order_id(sell_order)),\n\n # Get on chain order data\n buy_order_onchain.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_onchain(buy_order_id))),\n sell_order_onchain.set(cast(abi.ReturnedValue, OrderStateHandler.get_order_onchain(sell_order_id))),\n\n # Validate the asset pair matches\n buy_order.sell_instrument.store_into(buyer_sell_instrument),\n buy_order.buy_instrument.store_into(buyer_buy_instrument),\n sell_order.sell_instrument.store_into(seller_sell_instrument),\n sell_order.buy_instrument.store_into(seller_buy_instrument),\n\n Assert(buyer_sell_instrument.get() == seller_buy_instrument.get()),\n Assert(buyer_buy_instrument.get() == seller_sell_instrument.get()),\n\n # Validate the orders are not expired\n buy_order.expiration_time.use(lambda expiration_time:\n Assert(expiration_time.get() > Global.latest_timestamp())\n ),\n sell_order.expiration_time.use(lambda expiration_time:\n Assert(expiration_time.get() > Global.latest_timestamp())\n ),\n\n # Validate the orders match\n buyer_sell_amount.set(buy_order.sell_amount),\n buyer_buy_amount.set(buy_order.buy_amount),\n seller_sell_amount.set(sell_order.sell_amount),\n seller_buy_amount.set(sell_order.buy_amount),\n\n Assert(\n BytesGe(\n BytesMul(Itob(buyer_sell_amount.get()), Itob(seller_sell_amount.get())),\n BytesMul(Itob(buyer_buy_amount.get()), Itob(seller_buy_amount.get()))\n )\n ),\n\n # Validate that the swap is fair for both the seller and the buyer\n buyer_to_send.set(server_args.buyer_to_send),\n seller_to_send.set(server_args.seller_to_send),\n\n Assert(\n BytesGe(\n BytesMul(Itob(buyer_to_send.get()), Itob(seller_sell_amount.get())),\n BytesMul(Itob(seller_to_send.get()), Itob(seller_buy_amount.get()))\n )\n ),\n\n Assert(\n BytesGe(\n BytesMul(Itob(seller_to_send.get()), Itob(buyer_sell_amount.get())),\n BytesMul(Itob(buyer_to_send.get()), Itob(buyer_buy_amount.get()))\n )\n ),\n\n # Validate that we are not sending more than allowed\n buyer_sell_remaining.set(buy_order_onchain.sell_remaining),\n Assert(buyer_sell_remaining.get() >= buyer_to_send.get()),\n seller_sell_remaining.set(sell_order_onchain.sell_remaining),\n Assert(seller_sell_remaining.get() >= seller_to_send.get()),\n\n # Validate that we are not borrowing more thn allowed\n buyer_borrow_remaining.set(buy_order_onchain.borrow_remaining),\n buyer_to_borrow.set(server_args.buyer_to_borrow),\n Assert(buyer_borrow_remaining.get() >= buyer_to_borrow.get()),\n\n seller_borrow_remaining.set(sell_order_onchain.borrow_remaining),\n seller_to_borrow.set(server_args.seller_to_borrow),\n Assert(seller_borrow_remaining.get() >= seller_to_borrow.get()),\n\n # Validate that we are not repaying more than allowed\n buyer_repay_remaining.set(buy_order_onchain.repay_remaining),\n buyer_to_repay.set(server_args.buyer_to_repay),\n Assert(buyer_repay_remaining.get() >= buyer_to_repay.get()),\n\n seller_repay_remaining.set(sell_order_onchain.repay_remaining),\n seller_to_repay.set(server_args.seller_to_repay),\n Assert(seller_repay_remaining.get() >= seller_to_repay.get()),\n\n # Validate that the fees are lower than the maximum possible\n buyer_fees.set(server_args.buyer_fees),\n seller_fees.set(server_args.seller_fees),\n Assert(buyer_fees.get() <= (buyer_to_send.get() / MAX_FEES_DIVISOR)),\n Assert(seller_fees.get() <= (buyer_to_send.get() / MAX_FEES_DIVISOR)),\n\n # We shouldn't borrow / repay more than the assets traded, including fees.\n Assert(buyer_to_borrow.get() <= buyer_to_send.get() + buyer_fees.get()),\n Assert(buyer_to_repay.get() <= seller_to_send.get()),\n Assert(seller_to_borrow.get() <= seller_to_send.get()),\n Assert(seller_to_repay.get() <= buyer_to_send.get() - seller_fees.get()),\n\n # Generate the updated order book for the buy order\n buyer_new_sell_remaining.set(buyer_sell_remaining.get() - buyer_to_send.get()),\n buyer_new_borrow_remaining.set(buyer_borrow_remaining.get() - buyer_to_borrow.get()),\n buyer_new_repay_remaining.set(buyer_repay_remaining.get() - buyer_to_repay.get()),\n buyer_new_order_onchain.set(buyer_new_sell_remaining, buyer_new_borrow_remaining, buyer_new_repay_remaining),\n\n # Generate the updated order book for the sell order\n seller_new_sell_remaining.set(seller_sell_remaining.get() - seller_to_send.get()),\n seller_new_borrow_remaining.set(seller_borrow_remaining.get() - seller_to_borrow.get()),\n seller_new_repay_remaining.set(seller_repay_remaining.get() - seller_to_repay.get()),\n seller_new_order_onchain.set(seller_new_sell_remaining, seller_new_borrow_remaining, seller_new_repay_remaining),\n\n # Calculate the swap amounts\n buyer_buy_delta.set(seller_to_send.get()),\n seller_buy_delta.set(buyer_to_send.get() - seller_fees.get()),\n buyer_sell_delta.set(signed_neg(buyer_to_send.get() + buyer_fees.get())),\n seller_sell_delta.set(signed_neg(seller_to_send.get())),\n\n # Update the on chain order data\n OrderStateHandler.set_order_onchain(buy_order_id, buyer_new_order_onchain),\n OrderStateHandler.set_order_onchain(sell_order_id, seller_new_order_onchain),\n\n # Get old health for both users if needed\n buyer_negative_margin.set(server_args.buyer_negative_margin),\n seller_negative_margin.set(server_args.seller_negative_margin),\n\n If(buyer_negative_margin.get()).Then(\n buyer_old_health.set(cast(abi.ReturnedValue, health_check(buy_account, abi_false))),\n ),\n\n If(seller_negative_margin.get()).Then(\n seller_old_health.set(cast(abi.ReturnedValue, health_check(sell_account, abi_false))),\n ),\n\n # Handle borrow updates\n If(buyer_to_borrow.get() > Int(0)).Then(\n neg_borrow.set(signed_neg(buyer_to_borrow.get())),\n cast(Expr, perform_pool_move(buy_account, buyer_sell_instrument, neg_borrow)),\n ),\n If(seller_to_borrow.get() > Int(0)).Then(\n neg_borrow.set(signed_neg(seller_to_borrow.get())),\n cast(Expr, perform_pool_move(sell_account, seller_sell_instrument, neg_borrow)),\n ),\n\n # Perform swap updates\n cast(Expr, signed_add_to_cash(buy_account, buyer_buy_instrument, buyer_buy_delta)),\n cast(Expr, signed_add_to_cash(sell_account, seller_buy_instrument, seller_buy_delta)),\n cast(Expr, signed_add_to_cash(buy_account, buyer_sell_instrument, buyer_sell_delta)),\n cast(Expr, signed_add_to_cash(sell_account, seller_sell_instrument, seller_sell_delta)),\n\n # Collect the fees\n cast(Expr, collect_fees(buyer_sell_instrument, buyer_fees)),\n cast(Expr, collect_fees(seller_buy_instrument, seller_fees)),\n\n # Handle repay updates\n If(buyer_to_repay.get() > Int(0)).Then(\n cast(Expr, perform_pool_move(buy_account, buyer_buy_instrument, buyer_to_repay)),\n ),\n If(seller_to_repay.get() > Int(0)).Then(\n cast(Expr, perform_pool_move(sell_account, seller_buy_instrument, seller_to_repay)),\n ),\n\n # Validate the users are still healthy\n buyer_health.set(cast(abi.ReturnedValue, health_check(buy_account, abi_false))),\n Assert(Or(Not(signed_ltz(buyer_health.get())), And(buyer_negative_margin.get(), signed_gte(buyer_health.get(), buyer_old_health.get())))),\n seller_health.set(cast(abi.ReturnedValue, health_check(sell_account, abi_false))),\n Assert(Or(Not(signed_ltz(seller_health.get())), And(seller_negative_margin.get(), signed_gte(seller_health.get(), seller_old_health.get())))),\n )" }, { "identifier": "add_order", "path": "contracts_unified/core/methods/settle.py", "snippet": "@ABIReturnSubroutine\ndef add_order(\n # NOTE: Any update on this function must update ADD_ORDER_SIG and ADD_ORDER_ARG_COUNT above\n account: AccountAddress,\n user_op: OperationMetaData,\n _delegation_chain: DelegationChain,\n opup_budget: Amount,\n) -> Expr:\n\n \"\"\"Adds an order to the order book\n\n Arguments:\n\n account (AccountAddress): User's account address.\n user_op (OperationMetaData): Operation metadata containing order data.\n _delegation_chain (DelegationChain): Delegation chain. Unused.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n order = OrderData()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Validate signature validator' call\n cast(Expr, sender_is_sig_validator()),\n\n # Get order from user_op.data\n user_op.operation.use(lambda op_data:\n Seq(\n order.decode(op_data.get()),\n order.operation.use(lambda op: Assert(op.get() == OperationId.Settle)),\n order.account.use(lambda acc: Assert(acc.get() == account.get()))\n )\n ),\n\n # Add order to the order book\n cast(Expr, OrderStateHandler.add_order(order))\n )" }, { "identifier": "update_instrument", "path": "contracts_unified/core/methods/update_instrument.py", "snippet": "@ABIReturnSubroutine\ndef update_instrument(\n info: UpdateInstrumentInfo,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the method that adds an instrument to the Core contract storage box.\n\n Arguments:\n\n info (UpdateInstrumentInfo): Instrument information to add or update.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n \"\"\"\n\n abi_zero = abi.Uint64()\n abi_rate_one = abi.Uint64()\n abi_zero_address = abi.Address()\n\n timestamp = RelativeTimestamp()\n\n asset_id = AssetId()\n initial_haircut = Ratio()\n initial_margin = Ratio()\n maintenance_haircut = Ratio()\n maintenance_margin = Ratio()\n optimal_utilization = Ratio()\n min_rate = InterestRate()\n opt_rate = InterestRate()\n max_rate = InterestRate()\n borrow_index = abi.Uint64()\n lend_index = abi.Uint64()\n borrowed = Amount()\n liquidity = Amount()\n entry = InstrumentListElement()\n\n instrument_id = InstrumentId()\n instrument_count = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Validate sender\n Assert(Txn.sender() == GlobalStateHandler.get_quant_address()),\n\n # Initialize the instrument box first if it doesn't exist\n cast(Expr, GlobalStateHandler.initialize()),\n\n # Get init time\n timestamp.set(GlobalStateHandler.get_relative_timestamp()),\n\n # Create the instrument list element\n abi_zero.set(Int(0)),\n abi_rate_one.set(RATE_ONE),\n abi_zero_address.set(Global.zero_address()),\n\n # Extract fields from info\n asset_id.set(info.asset_id),\n initial_haircut.set(info.initial_haircut),\n initial_margin.set(info.initial_margin),\n maintenance_haircut.set(info.maintenance_haircut),\n maintenance_margin.set(info.maintenance_margin),\n optimal_utilization.set(info.optimal_utilization),\n min_rate.set(info.min_rate),\n opt_rate.set(info.opt_rate),\n max_rate.set(info.max_rate),\n\n # Load the current instrument count and validate it\n instrument_id.set(info.instrument_id),\n instrument_count.set(GlobalStateHandler.get_instrument_count()),\n Assert(instrument_id.get() <= instrument_count.get()),\n\n # Validate instrument zero is always algo\n If(instrument_id.get() == Int(0))\n .Then(Assert(asset_id.get() == Int(0))),\n\n # Check for new entry vs old entry\n If(instrument_id.get() == instrument_count.get())\n .Then(\n # Perform optin to asset if needed\n If(asset_id.get() != Int(0), cast(Expr, inner_asset_opt_in(asset_id))),\n\n # Create the new entry\n borrow_index.set(abi_rate_one),\n lend_index.set(abi_rate_one),\n borrowed.set(abi_zero),\n liquidity.set(abi_zero),\n\n # Increase the instrument count\n GlobalStateHandler.set_instrument_count(instrument_count.get() + Int(1)),\n )\n .Else(\n # Not a new instrument, we need to accrue the interest\n cast(Expr, perform_pool_move(abi_zero_address, instrument_id, abi_zero)),\n # Retain the accrued interest values for the new entry\n entry.set(cast(abi.ReturnedValue, GlobalStateHandler.get_instrument(instrument_id))),\n # NOTE: The timestamp should be the same as the one for a new instrument\n entry.borrow_index.store_into(borrow_index),\n entry.lend_index.store_into(lend_index),\n entry.borrowed.store_into(borrowed),\n entry.liquidity.store_into(liquidity),\n ),\n\n # Create the new entry\n entry.set(\n asset_id,\n initial_haircut,\n initial_margin,\n maintenance_haircut,\n maintenance_margin,\n timestamp,\n borrow_index,\n lend_index,\n optimal_utilization,\n min_rate,\n opt_rate,\n max_rate,\n borrowed,\n liquidity,\n ),\n\n # Perform update/insert for entry\n GlobalStateHandler.set_instrument(instrument_id, entry),\n\n # Ensure we have enough funds for mbr\n cast(Expr, GlobalStateHandler.ensure_mbr_fund()),\n )" }, { "identifier": "update_parameter", "path": "contracts_unified/core/methods/update_parameter.py", "snippet": "@ABIReturnSubroutine\ndef update_parameter(\n key_to_update: abi.DynamicBytes,\n updated_value: abi.DynamicBytes,\n) -> Expr:\n \"\"\"Implements the method that changes a global parameter of the contract.\n\n Arguments:\n\n key_to_update (abi.DynamicBytes): Key of the parameter to update\n updated_value (abi.DynamicBytes): New value of the parameter\n\n \"\"\"\n\n key = ScratchVar(TealType.bytes)\n value = ScratchVar(TealType.bytes)\n\n return Seq(\n key.store(key_to_update.get()),\n value.store(updated_value.get()),\n If(key.load() == KEY_LIQUIDATION_FACTORS).Then(\n Assert(GlobalStateHandler.get_quant_address() == Txn.sender()),\n GlobalStateHandler.set_liquidation_factors(value.load())\n ).Else(\n Assert(Global.creator_address() == Txn.sender()),\n Cond(\n [key.load() == KEY_PRICECASTER_ID, GlobalStateHandler.set_pricecaster_id(value.load())],\n [key.load() == KEY_WORMHOLE_BRIDGE_ID, GlobalStateHandler.set_wormhole_bridge_id(value.load())],\n [key.load() == KEY_SIGNATURE_VALIDATOR, GlobalStateHandler.set_signature_validator(value.load())],\n [key.load() == KEY_QUANT_ADDRESS, GlobalStateHandler.set_quant_address(value.load())],\n [key.load() == KEY_FEE_TARGET, GlobalStateHandler.set_fee_target(value.load())],\n [key.load() == KEY_WITHDRAW_BUFFER, GlobalStateHandler.set_withdraw_buffer(value.load())],\n [key.load() == KEY_OPERATOR_ADDRESS, GlobalStateHandler.set_operator_address(value.load())],\n )\n )\n )" }, { "identifier": "withdraw", "path": "contracts_unified/core/methods/withdraw.py", "snippet": "@ABIReturnSubroutine\ndef withdraw(\n account: AccountAddress,\n user_op: OperationMetaData,\n delegation_chain: DelegationChain,\n server_params: WithdrawExtraData,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Withdraws funds from a user and sends them to a given Wormhole or Algorand address, depending on target chain\n\n Args:\n\n account (AccountAddress): The user account address.\n user_op (OperationMetaData): The user operation metadata. This contains signed withdraw data: instrument, amount, receiver, and maximum amount to borrow.\n delegation_chain (DelegationChain): The delegation chain. For withdraw operations this must be empty.\n server_params (abi.Uint64): The server parameters. For withdraw, this parameter just contains server' own balance.\n opup_budget (Amount): Additional computation budget for the operation.\n\n \"\"\"\n\n # Holds the withdraw buffer address\n wormhole_withdraw_buffer = abi.Address()\n\n # Constants\n abi_false = abi.Bool()\n\n # Holds extracted withdraw data from the user_op\n withdraw_data = WithdrawData()\n\n # Holds extracted withdraw data from the user_op\n instrument_id = InstrumentId()\n amount = Amount()\n receiver = WormholeAddress()\n max_borrow = Amount()\n amount_to_deduct = SignedAmount()\n amount_to_withdraw = SignedAmount()\n amount_to_borrow = SignedAmount()\n max_fees = Amount()\n\n # User balance, to calculate the cash/pool split of the withdrawal\n position = UserInstrumentData()\n balance = Amount()\n\n # Fees to be collected\n withdraw_fee = Amount()\n\n # Used to validate the user's health\n user_health = abi.Uint64()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Load constants\n abi_false.set(Int(0)),\n\n # Validate sender is a user proxy\n cast(Expr, sender_is_sig_validator()),\n\n # No delegation is allowed for withdraw\n Assert(delegation_chain.length() == Int(0)),\n\n # Decode and extract withdraw operation\n user_op.operation.use(lambda op_data:\n Seq(\n withdraw_data.decode(op_data.get()),\n withdraw_data.operation.use(lambda op: Assert(op.get() == OperationId.Withdraw)),\n withdraw_data.instrument.store_into(instrument_id),\n withdraw_data.amount.store_into(amount),\n withdraw_data.receiver.store_into(receiver),\n withdraw_data.max_borrow.store_into(max_borrow),\n withdraw_data.max_fees.store_into(max_fees),\n )\n ),\n\n # Calculate cash and pool withdrawal amounts\n position.set(cast(abi.ReturnedValue, LocalStateHandler.get_position(account, instrument_id))),\n balance.set(position.cash),\n server_params.locked_cash.use(lambda locked_cash:\n balance.set(balance.get() - locked_cash.get()),\n ),\n\n # Get the fees\n withdraw_fee.set(server_params.withdraw_fee),\n\n # Do not exceed maximum fee limit specified in request.\n Assert(withdraw_fee.get() <= max_fees.get()),\n\n # Validate the user is not borrowing more than they have allowed\n Assert(amount.get() <= max_borrow.get() + balance.get()),\n\n # Calculate withdrawal amounts\n If(amount.get() > balance.get())\n .Then(\n amount_to_borrow.set(signed_neg(amount.get() - balance.get())),\n )\n .Else(\n amount_to_borrow.set(Int(0)),\n ),\n # This is the delta value to apply to the user cash\n amount_to_deduct.set(signed_neg(amount.get())),\n # This is the amount the user will actually get, implicitly fails if fees are bigger than the amount\n amount_to_withdraw.set(amount.get() - withdraw_fee.get()),\n\n # Borrow if needed\n If(amount_to_borrow.get() != Int(0))\n .Then(cast(Expr, perform_pool_move(account, instrument_id, amount_to_borrow))),\n\n # Remove assets\n cast(Expr, signed_add_to_cash(account, instrument_id, amount_to_deduct)),\n\n # Pay fees\n cast(Expr, collect_fees(instrument_id, withdraw_fee)),\n\n # Validate user is still healthy\n # NOTE: Withdraw always makes the user less healthy, so we don't need to check\n # the user's health before the withdrawal\n user_health.set(health_check(account, abi_false)),\n Assert(Not(signed_ltz(user_health.get()))),\n\n # Now that assets/liabilities are up to date, send out payment transaction.\n # If we are withdrawing to offchain, we need to check wormhole transactions\n wormhole_withdraw_buffer.set(GlobalStateHandler.get_withdraw_buffer()),\n receiver.chain_id.use(lambda chain_id:\n receiver.address.use(lambda address:\n If(\n chain_id.get() == Int(ALGORAND_CHAIN_ID),\n cast(Expr, submit_withdraw_onchain(address, instrument_id, amount_to_withdraw)),\n cast(Expr, submit_withdraw_offchain(wormhole_withdraw_buffer, instrument_id, amount_to_withdraw)),\n )\n )\n ),\n )" }, { "identifier": "wormhole_deposit", "path": "contracts_unified/core/methods/wormhole_deposit.py", "snippet": "@ABIReturnSubroutine\ndef wormhole_deposit(\n portal_transfer_txn: abi.ApplicationCallTransaction,\n account: AccountAddress,\n payload: DepositWord,\n instrument_id: InstrumentId,\n opup_budget: Amount,\n) -> Expr:\n \"\"\"Implements the contract method called during an ASA deposit via Wormhole.\n\n Arguments:\n\n portal_transfer_txn (ApplicationCallTransaction): The ABI \"ApplicationCallTransaction\" argument referencing the previous transaction to this call in the \"Wormhole Deposit\" group. Must be of type \"application call\".\n account (AccountAddress): Target account address to deposit to.\n payload (DepositWord): Payload, must equal to \"WormholeDeposit\" string-literal.\n instrument_id (InstrumentId): Instrument to transfer.\n opup_budget (Amount): Additional computation budget to allocate to this transaction.\n\n ----------------------------------------------------------------------------------------------------------------------------------\n\n Security rationale: The completeTransfer method of the Wormhole Token Bridge guarantees that:\n\n - The VAA was processed by the vaaVerify method of the Wormhole Core.\n - The VAA matches the completeTransfer arg.\n - The portal_transfer method exists in the group and has the proper target appId matching the Vaa.\n - The portal_transfer method has the correct sender (the server in our case)\n\n If we can ensure that the completeTransfer method exists in the group and it's from\n the canonical Wormhole Token Bridge Appid, we can transitively check remaining properties\n for additional security.\n\n Additionally, the innertxn doing the transfer actually uses the VAA information which\n we ensure is correct for the three sources: this method, the completeTransfer method and the\n vaaVerify method in the Core.\n\n ref: https://github.com/wormhole-foundation/wormhole/blob/5255e933d68629f0643207b0f9d3fa797af5cbf7/algorand/token_bridge.py#L466\n\n \"\"\"\n\n vaa = portal_transfer_txn.get().application_args[1]\n complete_transfer_txn = Gtxn[portal_transfer_txn.get().group_index() - Int(1)]\n decoded_payload = DecodedWormholePayload()\n abi_vaa = abi.make(abi.DynamicBytes)\n abi_amount = abi.Uint64()\n abi_repay_amount = abi.Uint64()\n abi_receiver = abi.Address()\n\n return Seq(\n setup(opup_budget.get()),\n\n # Ensure there are no rogue transactions past the box-budget setup\n Assert(Global.group_size() == Txn.group_index() + Int(2), comment=\"Unknown transactions ahead detected\"),\n\n # Ensure completeTransfer from canonical Wormhole Token Bridge exists.\n Assert(complete_transfer_txn.application_args[0] == Bytes(\"completeTransfer\"), comment=\"expected completeTransfer method call\"),\n Assert(complete_transfer_txn.application_id() == GlobalStateHandler.get_wormhole_bridge_id(), comment=\"completeTransfer call appId unknown\"),\n\n # In our current design, owner == creator, so this is valid. What we should check?\n Assert(complete_transfer_txn.sender() == GlobalStateHandler.get_operator_address(), comment=\"completeTransfer call sender unknown\"),\n\n # Ensure VAAs match\n abi_vaa.decode(vaa),\n\n # The completeTransfer code ensures his VAA equals portal_transfer VAA, we check here\n # if we match our VAA\n Assert(complete_transfer_txn.application_args[1] == abi_vaa.get(), comment=\"VAAs do not match\"),\n\n # Decode the VAA\n decoded_payload.set(cast(abi.ReturnedValue, decode_wormhole_payload(abi_vaa))),\n abi_amount.set(decoded_payload.amount),\n abi_repay_amount.set(decoded_payload.repay_amount),\n abi_receiver.set(decoded_payload.receiver),\n\n # Validate the VAA, do we need more checks?\n XAssert(\n abi_receiver.get() == account.get(),\n comment=\"Receiving user address mismatch\",\n ),\n\n # Perform deposit\n cast(Expr, inner_deposit_asset(account, payload, instrument_id, abi_amount, abi_repay_amount)),\n )" } ]
from pyteal import ( BareCallActions, CallConfig, MethodConfig, OnCompleteAction, OptimizeOptions, Reject, Router, ) from contracts_unified.core.bare_calls import delete, update from contracts_unified.core.methods import ( account_move, add_order, clean_orders, create, deposit, fund_mbr, liquidate, pool_move, portal_transfer, settle, update_instrument, update_parameter, withdraw, wormhole_deposit, )
11,878
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()), delete_application=OnCompleteAction.always(delete()), ), clear_state=Reject(), ) CORE_ROUTER.add_method_handler( create, "create", MethodConfig(no_op=CallConfig.CREATE), "Create C3 Core contract", ) CORE_ROUTER.add_method_handler( update_instrument, "update_instrument", MethodConfig(no_op=CallConfig.CALL), "Add a new instrument (ASA) to the Core", ) CORE_ROUTER.add_method_handler( update_parameter, "update_parameter", MethodConfig(no_op=CallConfig.CALL), "Update a global parameter", ) CORE_ROUTER.add_method_handler( deposit, "deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account", ) CORE_ROUTER.add_method_handler( wormhole_deposit, "wormhole_deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account via Wormhole", ) CORE_ROUTER.add_method_handler( pool_move, "pool_move", MethodConfig(no_op=CallConfig.CALL), "Transfer instruments between user and pool", ) CORE_ROUTER.add_method_handler( add_order, "add_order", MethodConfig(no_op=CallConfig.CALL), "Add an order to the order book", ) CORE_ROUTER.add_method_handler( settle, "settle", MethodConfig(no_op=CallConfig.CALL), "Settle two orders" ) CORE_ROUTER.add_method_handler(
""" This file implements the router of the Core contract. """ CORE_ROUTER = Router( "C3 Core", BareCallActions( update_application=OnCompleteAction.always(update()), delete_application=OnCompleteAction.always(delete()), ), clear_state=Reject(), ) CORE_ROUTER.add_method_handler( create, "create", MethodConfig(no_op=CallConfig.CREATE), "Create C3 Core contract", ) CORE_ROUTER.add_method_handler( update_instrument, "update_instrument", MethodConfig(no_op=CallConfig.CALL), "Add a new instrument (ASA) to the Core", ) CORE_ROUTER.add_method_handler( update_parameter, "update_parameter", MethodConfig(no_op=CallConfig.CALL), "Update a global parameter", ) CORE_ROUTER.add_method_handler( deposit, "deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account", ) CORE_ROUTER.add_method_handler( wormhole_deposit, "wormhole_deposit", MethodConfig(no_op=CallConfig.CALL), "Deposit assets to user account via Wormhole", ) CORE_ROUTER.add_method_handler( pool_move, "pool_move", MethodConfig(no_op=CallConfig.CALL), "Transfer instruments between user and pool", ) CORE_ROUTER.add_method_handler( add_order, "add_order", MethodConfig(no_op=CallConfig.CALL), "Add an order to the order book", ) CORE_ROUTER.add_method_handler( settle, "settle", MethodConfig(no_op=CallConfig.CALL), "Settle two orders" ) CORE_ROUTER.add_method_handler(
withdraw,
14
2023-11-17 20:54:15+00:00
16k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/db/ark_dpa_db_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_generator=event_generator, theme=ARK_INQUIRER_THEME, *args, **kwargs)\n\n def render(self, question, answers=None):\n question.answers = answers or {}\n\n if question.ignore:\n return question.default\n\n clazz = self.render_factory(question.kind)\n render = clazz(question, terminal=self.terminal, theme=self._theme, show_default=question.show_default)\n if isinstance(\n render, (inquirer.render.console._text.Text, inquirer.render.console._password.Password, inquirer.render.console._path.Path)\n ):\n render.current = ''\n self.clear_eos()\n\n try:\n a = self._event_loop(render)\n if not a and question.default:\n a = question.default\n elif not a and question.name in answers:\n a = answers[question.name]\n return a\n finally:\n print('')\n\n def _print_header(self, render):\n base = render.get_header()\n\n header = base[: self.width - 9] + '...' if len(base) > self.width - 6 else base\n default_value = '{normal} ({default})'.format(default=render.question.default, normal=self.terminal.normal)\n show_default = render.question.default and render.show_default\n header += default_value if show_default else ''\n msg_template = '{t.move_up}{t.clear_eol}{tq.brackets_color}{tq.mark_color}?{tq.brackets_color} {msg}{t.normal}'\n\n escaped_current_value = str(render.get_current_value()).replace('{', '{{').replace('}', '}}')\n self.print_str(\n f'\\n{msg_template} {escaped_current_value}',\n msg=header,\n lf=not render.title_inline,\n tq=self._theme.Question,\n )" }, { "identifier": "ArkISPAuth", "path": "ark_sdk_python/auth/ark_isp_auth.py", "snippet": "class ArkISPAuth(ArkAuth):\n def __perform_identity_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=secret.secret.get_secret_value() if secret else None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, ArkSystemConfig.is_interactive() and method_settings.identity_mfa_interactive, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform [{str(ex)}]')\n raise ArkAuthException from ex\n\n def __perform_identity_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n load_cache=True,\n cache_profile=profile,\n )\n identity.refresh_auth_identity(profile, method_settings.identity_mfa_interactive, False)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n raise ArkAuthException('Failed to authenticate to isp via identity') from ex\n\n def __perform_identity_service_user_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n if not secret:\n raise ArkException('Token secret is required for identity service user auth')\n method_settings = cast(IdentityServiceUserArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentityServiceUser(\n username=auth_profile.username,\n token=secret.secret.get_secret_value(),\n app_name=method_settings.identity_authorization_application,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.IdentityServiceUser,\n expires_in=datetime.now() + timedelta(hours=4),\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform with service user [{str(ex)}]')\n raise ArkAuthException from ex\n\n @overrides\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs authentication to the identity security platform identity tenant\n Authentication can be done with either a service user or a normal user\n Authentication Methods:\n - Identity, Default\n - IdentityServiceUser\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n secret (Optional[ArkSecret], optional): _description_. Defaults to None.\n force (bool, optional): _description_. Defaults to False.\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_authentication(profile, auth_profile, secret, force)\n if auth_profile.auth_method == ArkAuthMethod.IdentityServiceUser:\n return self.__perform_identity_service_user_authentication(profile, auth_profile, secret, force)\n raise ArkAuthException('Given auth method is not supported')\n\n @overrides\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Refresh for isp tenant is supported only for identity\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing refresh authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_refresh_authentication(profile, auth_profile, token)\n return token\n\n @staticmethod\n @overrides\n def authenticator_name() -> str:\n return AUTH_NAME\n\n @staticmethod\n @overrides\n def authenticator_human_readable_name() -> str:\n return AUTH_HUMAN_READABLE_NAME\n\n @staticmethod\n @overrides\n def supported_auth_methods() -> List[ArkAuthMethod]:\n return AUTH_METHODS\n\n @staticmethod\n @overrides\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n return DEFAULT_AUTH_METHOD, DEFAULT_AUTH_METHOD_SETTINGS" }, { "identifier": "ArkDPABasePoliciesEditorService", "path": "ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py", "snippet": "class ArkDPABasePoliciesEditorService(\n ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]\n):\n def __init__(\n self,\n policy_type: PolicyType,\n add_policy_type: AddPolicyType,\n update_policy_type: UpdatePolicyType,\n isp_auth: ArkISPAuth,\n policies_family: str,\n tenant_id: str,\n policies_cache_dir: Optional[str] = None,\n profile: Optional[ArkProfile] = None,\n ) -> None:\n super().__init__(isp_auth)\n profile = profile or ArkProfileLoader.load_default_profile()\n self._policies_family = policies_family\n self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)\n if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:\n self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])\n self.__policies_cache_dir = self.__policies_cache_dir / policies_family\n self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)\n self.__policy_type = policy_type\n self.__add_policy_type = add_policy_type\n self.__update_policy_type = update_policy_type\n\n @abstractmethod\n def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:\n pass\n\n @abstractmethod\n def _list_policies(self) -> List[PolicyListItemType]:\n pass\n\n @abstractmethod\n def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n pass\n\n @abstractmethod\n def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:\n pass\n\n def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:\n remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))\n if remote_policy != workspace_policy:\n return (workspace_policy, remote_policy)\n return None\n\n def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:\n workspace_policies = self.__load_existing_policies_from_workspace()\n with ThreadPoolExecutor() as executor:\n remote_policies = {\n p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None\n }\n return remote_policies\n\n def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:\n p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')\n policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']\n policies = {}\n for f in policies_files:\n policy = self.__policy_type.parse_file(f)\n policies[policy.policy_name] = policy\n return policies\n\n def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.removed')\n\n def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.generated')\n\n def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix()\n\n def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:\n policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))\n policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')\n if policy_path.exists():\n existing_data = self.__policy_type.parse_raw(policy_path.read_text())\n if existing_data != policy_data:\n if not override:\n return policy_data\n if not policy_data.policy_id:\n policy_data.policy_id = policy.policy_id\n policy_path.write_text(policy_data.json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:\n \"\"\"\n Loads all remote policies into the local workspace.\n The user is asked whether to overwrite existing policies that were edited either locally or remotely.\n When default overwrite is enabled, existing policies are overwritten without prompts.\n\n Args:\n load_policies (ArkDPALoadPolicies): _description_\n\n Returns:\n ArkDPALoadedPolicies: _description_\n \"\"\"\n policies = self._list_policies()\n policies_to_query: Dict[str, PolicyType] = []\n with ThreadPoolExecutor() as executor:\n policies_to_query = {\n p.policy_name: p\n for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)\n if p is not None\n }\n # Build the query editor to ask the user\n policies_to_override = []\n if policies_to_query:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'override',\n message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',\n choices=[p.policy_name for p in policies_to_query.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policies_to_override = answers['override']\n for policy_name in policies_to_override:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_path.exists() and policy_name in policies_to_query:\n policy_path.write_text(policies_to_query[policy_name].json(indent=4))\n return ArkDPALoadedPolicies(\n loaded_path=str(self.__policies_cache_dir),\n overall_policies_count=len(policies),\n loaded_policies_count=len(policies) - len(policies_to_query),\n overriden_policies_count=len(policies_to_override),\n untouched_policies_count=len(policies_to_query) - len(policies_to_override),\n )\n\n def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:\n \"\"\"\n Edits the set of specified policies one at a time, either via the CLI or the default OS editor.\n Edited policies are only saved locally until they are committed.\n\n Args:\n edit_policies (ArkDPAEditPolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'\n )\n policy_names = edit_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to edit?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')\n for name in policy_names\n ],\n render=ArkInquirerRender(),\n answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},\n )\n for name in policy_names:\n policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])\n for path in [\n Path(self.__policies_cache_dir) / (name + '.json'),\n Path(self.__policies_cache_dir) / (name + '.json.generated'),\n ]:\n if path.exists():\n path.write_text(policy.json(indent=4))\n break\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit {self._policies_family} policies, '\n f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:\n \"\"\"\n Removes one or more policies from the local workspace.\n Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.\n After committing the changes, the policies are deleted both locally and remotely.\n New, uncommitted policies are deleted locally after the user consents.\n\n Args:\n remove_policies (ArkDPARemovePolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'\n )\n policy_names = remove_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to remove?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n for policy_name in policy_names:\n for path in [\n Path(self.__policies_cache_dir) / (policy_name + '.json'),\n Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),\n ]:\n if path.exists():\n if path.suffix == '.json':\n path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))\n else:\n answers = inquirer.prompt(\n [\n inquirer.Confirm(\n 'remove',\n message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n if answers['remove']:\n path.unlink(missing_ok=True)\n\n def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:\n \"\"\"\n Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.\n Policies are viewed in the machine's default editor (both existing policies and newly generated policies).\n\n Args:\n view_policies (ArkDPAViewPolicies): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy_names = view_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to view?',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n if not policy_names:\n return\n try:\n if view_policies.unified:\n inquirer.prompt(\n [inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],\n answers={\n 'views': '\\n\\n\\n'.join(\n [f'# Policy [{policy_name}]\\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]\n )\n },\n render=ArkInquirerRender(),\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to view the {self._policies_family} policies, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:\n \"\"\"\n Resets local workspace policies.\n When all policies are reset, all local policies are overwritten and deleted policies are removed.\n Otherwise, the user can select which policies are reset.\n This function does not alter newly generated uncommitted policies.\n\n Args:\n reset_policy (ArkDPAResetPolicies): _description_\n \"\"\"\n if reset_policy.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]\n )\n if not answers:\n return\n if answers['reset']:\n self.load_policies(ArkDPALoadPolicies(override=True))\n else:\n policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not policies_diff and not removed_policies:\n return\n policy_names = reset_policy.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to reset?, press space to select',\n choices=[p for p in policies_diff.keys() + removed_policies.keys()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]\n for policy_name in policy_names:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_name in policies_diff:\n policy_path.write_text(policies_diff[policy_name][1].json(indent=4))\n elif policy_name in removed_policies:\n policy_path.write_text(removed_policies[policy_name].json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def generate_policy(self, generate_policy: GeneratePolicyType) -> None:\n \"\"\"\n Generates a new policy from a template and the user's parameters.\n The user is prompted for the parameters when they are not specified in the CLI.\n After policy's parameters are defined, the policy is generates in memory and can bee edited.\n The new policy is saved locally until it is committed.\n\n Args:\n generate_policy (GeneratePolicyType): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy = self._generate_policy(generate_policy, workspace_policies)\n policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')\n # Let the user edit the generated policy\n if not generate_policy.disable_edit:\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(\n 'policy_editor',\n f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',\n )\n ],\n render=ArkInquirerRender(),\n answers={'policy_editor': policy.json(indent=4, exclude_none=True)},\n )\n if not answers:\n return\n policy = self.__policy_type.parse_raw(answers['policy_editor'])\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit the {self._policies_family} policy, '\n f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'\n )\n policy_path.write_text(policy.json(indent=4))\n\n def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None:\n \"\"\"\n Calculates the diff between the local workspace and remote policies.\n This diff includes uncommitted removed policies. A unified or per policy diff can be displayed.\n\n Args:\n policies_diff (ArkDPAPoliciesDiff): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies:\n return\n if policies_diff.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in policies_diff.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in policies_diff.names}\n if not loaded_policies_diff and not removed_policies:\n return\n diffs = {\n policy_name: difflib.unified_diff(\n policy_tuple[1].json(indent=4).splitlines(True),\n policy_tuple[0].json(indent=4).splitlines(True),\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy_tuple in loaded_policies_diff.items()\n }\n diffs.update(\n {\n policy_name: difflib.unified_diff(\n policy.json(indent=4).splitlines(True),\n '',\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy in removed_policies.items()\n }\n )\n try:\n if policies_diff.unified:\n inquirer.prompt(\n [inquirer.Editor('diffs', 'Show all diffs')],\n render=ArkInquirerRender(),\n answers={'diffs': '\\n\\n\\n'.join([''.join(d) for d in diffs.values()])},\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_diff', f'Show [{policy_name}] diff') for policy_name in diffs.keys()],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_diff': ''.join(policy_diffs) for policy_name, policy_diffs in diffs.items()},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to show {self._policies_family} policies diff, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def policies_status(self, get_policies_status: ArkDPAGetPoliciesStatus) -> ArkDPAPoliciesStatus:\n \"\"\"\n Gets the status of locally altered policies.\n\n Args:\n get_policies_status (ArkDPAGetPoliciesStatus): _description_\n\n Returns:\n ArkDPAPoliciesStatus: _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if get_policies_status.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in get_policies_status.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in get_policies_status.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in get_policies_status.names}\n return ArkDPAPoliciesStatus(\n modified_policies=list(loaded_policies_diff.keys()),\n removed_policies=list(removed_policies.keys()),\n added_policies=list(generated_policies.keys()),\n )\n\n def commit_policies(self, commit_policies: ArkDPACommitPolicies) -> None:\n \"\"\"\n Commits policies.\n The function first calculates the differences between the local and remote policies to find out which policies were edited, including\n the policies selected for deletion and new, uncommitted policies. It also\n allows selecting whether to commit all the edited policies or only specific policies by name.\n\n After all policies are committed, the workspace is reorganized accordingly.\n\n Args:\n commit_policies (ArkDPACommitPolicies): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n if commit_policies.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to commit all edited {self._policies_family} policies?')]\n )\n if not answers or not answers['reset']:\n return\n else:\n if commit_policies.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in commit_policies.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in commit_policies.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in commit_policies.names}\n else:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to commit?, press space to select',\n choices=list(loaded_policies_diff.keys()) + list(removed_policies.keys()) + list(generated_policies.keys()),\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in answers['names']}\n removed_policies = {k: v for k, v in removed_policies.items() if k in answers['names']}\n generated_policies = {k: v for k, v in generated_policies.items() if k in answers['names']}\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n with ThreadPoolExecutor() as executor:\n added = executor.map(lambda p: self._add_policy(self.__add_policy_type(**p.dict())), generated_policies.values())\n updated = executor.map(lambda p: self._update_policy(self.__update_policy_type(**p[0].dict())), loaded_policies_diff.values())\n deleted = executor.map(\n lambda p: self._delete_policy(ArkDPADeletePolicy(policy_id=p.policy_id, policy_name=p.policy_name)),\n removed_policies.values(),\n )\n # Loop for exception checking\n added_policies = list(added)\n for _ in itertools.chain(updated, deleted):\n pass\n for policy_name in removed_policies.keys():\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n for policy_name in generated_policies.keys():\n for policy in added_policies:\n if policy.policy_name == policy_name:\n (Path(self.__policies_cache_dir) / (policy_name + '.json.generated')).rename(\n (Path(self.__policies_cache_dir) / (policy_name + '.json'))\n )\n (Path(self.__policies_cache_dir) / (policy_name + '.json')).write_text(policy.json(indent=4))" }, { "identifier": "ArkProfile", "path": "ark_sdk_python/models/ark_profile.py", "snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles" }, { "identifier": "ArkDPADBGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/db/ark_dpa_db_generate_policy.py", "snippet": "class ArkDPADBGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['MySQL', 'MariaDB', 'Postgres', 'MSSQL', 'Oracle']]] = Field(\n description='Providers to generate the policy for'\n )" }, { "identifier": "ArkWorkspaceType", "path": "ark_sdk_python/models/common/ark_workspace_type.py", "snippet": "class ArkWorkspaceType(str, MultiValueEnum):\n AWS = 'aws', 'AWS', 'Aws'\n AZURE = 'azure', 'AZURE', 'Azure'\n ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'\n DB = 'db', 'DATABASES', 'Databases'\n GCP = 'gcp', 'GCP'\n MYSQL = 'mysql', 'MySQL'\n MARIADB = 'mariadb', 'MariaDB'\n MSSQL = 'mssql', 'MSSQL'\n ORACLE = 'oracle', 'Oracle'\n POSTGRES = 'postgres', 'Postgres'\n FAULT = 'fault', 'FAULT'\n UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'" }, { "identifier": "ArkServiceConfig", "path": "ark_sdk_python/models/services/ark_service_config.py", "snippet": "class ArkServiceConfig(ArkModel):\n service_name: str = Field(description='Name of the service')\n required_authenticator_names: List[str] = Field(description='Required authenticators for the service to properly work')\n optional_authenticator_names: List[str] = Field(\n description='Optional authenticators for the service for extra capabilities', default_factory=list\n )" }, { "identifier": "ArkDPADeletePolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py", "snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAGetPolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py", "snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPARuleStatus", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_rule_status.py", "snippet": "class ArkDPARuleStatus(str, Enum):\n Enabled = 'Enabled'\n Disabled = 'Disabled'\n Draft = 'Draft'\n Expired = 'Expired'" }, { "identifier": "ArkDPAUserData", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_user_data.py", "snippet": "class ArkDPAUserData(ArkCamelizedModel):\n roles: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Roles allowed for auth rule', default_factory=list)\n groups: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Groups allowed for auth rule', default_factory=list)\n users: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Users allowed for auth rule', default_factory=list)" }, { "identifier": "ArkDPADBAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_add_policy.py", "snippet": "class ArkDPADBAddPolicy(ArkDPABaseAddPolicy):\n providers_tags: List[str] = Field(description='Policy tags to use as filters for the assets in the rules', default_factory=list)\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )" }, { "identifier": "ArkDPADBAuthorizationRule", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_authorization_rule.py", "snippet": "class ArkDPADBAuthorizationRule(ArkDPABaseAuthorizationRule):\n connection_information: ArkDPADBConnectionInformation = Field(description='Rule information on how access is made')" }, { "identifier": "ArkDPADBConnectionInformation", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_authorization_rule.py", "snippet": "class ArkDPADBConnectionInformation(ArkDPABaseConnectionInformation):\n connect_as: ArkDPADBConnectAs = Field(description='In which fashion the connection is made')" }, { "identifier": "ArkDPADBAppliedTo", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBAppliedTo(ArkCamelizedModel):\n name: str = Field(description='Name of the resource to apply the auth to')\n type: ArkDPADBResourceIdentifierType = Field(description='Type of the resource')" }, { "identifier": "ArkDPADBBaseAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBBaseAuth(ArkCamelizedModel):\n pass" }, { "identifier": "ArkDPADBConnectAs", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBConnectAs(ArkCamelizedModel):\n ldap_auth: Optional[Union[ArkDPADBLDAPAuth, List[ArkDPADBLDAPAuth]]] = Field(\n description='LDAP related authentication, only applies to MSSQL DB'\n )\n db_auth: Optional[Union[ArkDPADBLocalDBAuth, List[ArkDPADBLocalDBAuth]]] = Field(\n description='Local DB related authentication, only applies to MySQL / MariaDB / Postgres'\n )\n oracle_auth: Optional[Union[ArkDPADBOracleDBAuth, List[ArkDPADBOracleDBAuth]]] = Field(\n description='Oracle DB related authentication, only applies to Oracle'\n )" }, { "identifier": "ArkDPADBLDAPAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBLDAPAuth(ArkDPADBBaseAuth):\n assign_groups: List[str] = Field(description='LDAP groups to assign the ephemeral user to')\n applied_to: Optional[List[ArkDPADBAppliedTo]] = Field(description='Which resources to apply to')" }, { "identifier": "ArkDPADBLocalDBAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBLocalDBAuth(ArkDPADBBaseAuth):\n roles: List[str] = Field(description='Local DB roles to assign the ephemeral user to')\n applied_to: Optional[List[ArkDPADBAppliedTo]] = Field(description='Which resources to apply to')" }, { "identifier": "ArkDPADBOracleDBAuth", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBOracleDBAuth(ArkDPADBBaseAuth):\n roles: List[str] = Field(description='Local DB roles to assign the ephemeral user to')\n applied_to: Optional[List[ArkDPADBAppliedTo]] = Field(description='Which resources to apply to')\n dba_role: bool = Field(description='Whether to apply to the ephemeral user the DBA role', default=False)\n sysdba_role: bool = Field(description='Whether to apply to the ephemeral user the SYSDBA role', default=False)\n sysoper_role: bool = Field(description='Whether to apply to the ephemeral user the SYSOPER role', default=False)" }, { "identifier": "ArkDPADBResourceIdentifierType", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_connection_data.py", "snippet": "class ArkDPADBResourceIdentifierType(str, Enum):\n RESOURCE = 'resource'\n TAG = 'tag'" }, { "identifier": "ArkDPADBPolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_policy.py", "snippet": "class ArkDPADBPolicy(ArkDPABasePolicy):\n providers_tags: List[str] = Field(description='Policy tags', default_factory=list)\n providers_data: ArkDPADBProvidersData = Field(description='Policy providers data')\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(description='Authorization rules of the policy')" }, { "identifier": "ArkDPADBPolicyListItem", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_policy_list_item.py", "snippet": "class ArkDPADBPolicyListItem(ArkDPABasePolicyListItem):\n providers: Optional[List[ArkWorkspaceType]] = Field(description='Names of the database providers of the policy')\n providers_tags: List[str] = Field(description='Tags on the policy', default_factory=list)\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers')\n def validate_platforms(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.MYSQL,\n ArkWorkspaceType.MARIADB,\n ArkWorkspaceType.POSTGRES,\n ArkWorkspaceType.MSSQL,\n ArkWorkspaceType.ORACLE,\n ]:\n raise ValueError('Invalid Database Type')\n return val" }, { "identifier": "ArkDPADB", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADB(ArkCamelizedModel):\n pass" }, { "identifier": "ArkDPADBMariaDB", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBMariaDB(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBMSSQL", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBMSSQL(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBMySQL", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBMySQL(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBOracle", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBOracle(ArkDPADB):\n resources: List[Union[str, ArkDPADBOracleResource]] = Field(description='List of oracle resources / assets for the policy')" }, { "identifier": "ArkDPADBOracleResource", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBOracleResource(ArkCamelizedModel):\n name: str = Field(description='Name of the oracle db resource / asset')\n services: Optional[List[str]] = Field(description='Oracle services in the database')" }, { "identifier": "ArkDPADBPostgres", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBPostgres(ArkDPADBIdentifiers):\n pass" }, { "identifier": "ArkDPADBProvidersData", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_providers.py", "snippet": "class ArkDPADBProvidersData(ArkCamelizedModel):\n mssql: Optional[ArkDPADBMSSQL] = Field(description='MSSQL related resources')\n mysql: Optional[ArkDPADBMySQL] = Field(description='MySQL related resources')\n mariadb: Optional[ArkDPADBMariaDB] = Field(description='MariaDB related resources')\n postgres: Optional[ArkDPADBPostgres] = Field(description='PostgreSQL related resources')\n oracle: Optional[ArkDPADBOracle] = Field(description='Oracle related resources')\n\n @root_validator\n @classmethod\n def validate_min_providers(cls, data: Dict) -> Dict[str, Any]:\n if isinstance(data, dict):\n if all(value is None for value in data.values()):\n raise ValueError('policy should contain at least one provider')\n return data" }, { "identifier": "ArkDPADBUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/db/ark_dpa_db_update_policy.py", "snippet": "class ArkDPADBUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_tags: Optional[List[str]] = Field(description='Policy tags to use as filters for the assets in the rules')\n providers_data: Optional[ArkDPADBProvidersData] = Field(\n description='Policy providers data containing database assets of different types'\n )\n user_access_rules: Optional[List[ArkDPADBAuthorizationRule]] = Field(\n description='Authorization rules of the policy describing how and who can access the assets'\n )" }, { "identifier": "ArkDPADBPoliciesService", "path": "ark_sdk_python/services/dpa/policies/db/ark_dpa_db_policies_service.py", "snippet": "class ArkDPADBPoliciesService(ArkService):\n def __init__(self, isp_auth: ArkISPAuth) -> None:\n super().__init__(isp_auth)\n self.__isp_auth = isp_auth\n self.__client: ArkISPServiceClient = ArkISPServiceClient.from_isp_auth(self.__isp_auth, 'dpa')\n\n @property\n def isp_client(self) -> ArkISPServiceClient:\n return self.__client\n\n def __policy_id_by_name(self, policy_name: str) -> str:\n policies = self.list_policies_by(ArkDPADBPoliciesFilter(name=policy_name))\n if not policies:\n raise ArkServiceException(f'Failed to find db policy id by name [{policy_name}]')\n return policies[0].policy_id\n\n def add_policy(self, add_policy: ArkDPADBAddPolicy) -> ArkDPADBPolicy:\n \"\"\"\n Adds a new DB policy with the specified information.\n\n Args:\n add_policy (ArkDPADBAddPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n self._logger.info(f'Adding new db policy [{add_policy.policy_name}]')\n add_policy_dict = add_policy.dict(by_alias=True, exclude_none=True)\n resp: Response = self.__client.post(DB_POLICIES_API, json=add_policy_dict)\n if resp.status_code == HTTPStatus.CREATED:\n try:\n policy_id = resp.json()['policyId']\n return self.policy(ArkDPAGetPolicy(policy_id=policy_id))\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse add db policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse add sb policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to add db policy [{resp.text}] - [{resp.status_code}]')\n\n def delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n \"\"\"\n Deletes the specified (ID or name) DB policy.\n\n Args:\n delete_policy (ArkDPADeletePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n if delete_policy.policy_name and not delete_policy.policy_id:\n delete_policy.policy_id = self.__policy_id_by_name(delete_policy.policy_name)\n self._logger.info(f'Deleting db policy [{delete_policy.policy_id}]')\n resp: Response = self.__client.delete(DB_POLICY_API.format(policy_id=delete_policy.policy_id))\n if resp.status_code != HTTPStatus.NO_CONTENT:\n raise ArkServiceException(f'Failed to delete db policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy(self, update_policy: ArkDPADBUpdatePolicy) -> ArkDPADBPolicy:\n \"\"\"\n Updates a DB policy.\n\n Args:\n update_policy (ArkDPADBUpdatePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n if update_policy.policy_name and not update_policy.policy_id:\n update_policy.policy_id = self.__policy_id_by_name(update_policy.policy_name)\n self._logger.info(f'Updating db policy [{update_policy.policy_id}]')\n update_dict = json.loads(update_policy.json(by_alias=True, exclude_none=True, exclude={'new_policy_name', 'policy_name'}))\n if update_policy.new_policy_name:\n update_dict['policyName'] = update_policy.new_policy_name\n else:\n update_dict['policyName'] = update_policy.policy_name\n resp: Response = self.__client.put(DB_POLICY_API.format(policy_id=update_policy.policy_id), json=update_dict)\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPADBPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse update db policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse update db policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to update db policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy_status(self, update_policy_status: ArkDPAUpdatePolicyStatus) -> ArkDPADBPolicy:\n \"\"\"\n Updates the status of the specified (by ID) DB policy.\n\n Args:\n update_policy_status (ArkDPAUpdatePolicyStatus): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n if update_policy_status.policy_name and not update_policy_status.policy_id:\n update_policy_status.policy_id = self.__policy_id_by_name(update_policy_status.policy_name)\n self._logger.info(f'Updating db policy status [{update_policy_status.policy_id}]')\n resp: Response = self.__client.put(\n DB_UPDATE_POLICY_STATUS_API.format(policy_id=update_policy_status.policy_id),\n json=update_policy_status.dict(exclude={'policy_id'}),\n )\n if resp.status_code == HTTPStatus.OK:\n return self.policy(ArkDPAGetPolicy(policy_id=update_policy_status.policy_id))\n raise ArkServiceException(f'Failed to update db policy status [{resp.text}] - [{resp.status_code}]')\n\n def list_policies(self) -> List[ArkDPADBPolicyListItem]:\n \"\"\"\n Lists all of the tenants's DB policies.\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n List[ArkDPADBPolicyListItem]: _description_\n \"\"\"\n self._logger.info('Retrieving all db policies')\n resp: Response = self.__client.get(DB_POLICIES_API)\n if resp.status_code == HTTPStatus.OK:\n try:\n return parse_obj_as(List[ArkDPADBPolicyListItem], resp.json()['items'])\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse list db policies response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse list db policies response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to list db policies [{resp.text}] - [{resp.status_code}]')\n\n def list_policies_by(self, policies_filter: ArkDPADBPoliciesFilter) -> List[ArkDPADBPolicyListItem]:\n \"\"\"\n Lists DB policies that match the specified filters.\n\n Args:\n policies_filter (ArkDPADBPoliciesFilter): _description_\n\n Returns:\n List[ArkDPADBPolicyListItem]: _description_\n \"\"\"\n self._logger.info(f'Retrieving db policies by filter [{policies_filter}]')\n policies = self.list_policies()\n\n # Filter by statuses\n if policies_filter.statuses:\n policies = [p for p in policies if p.status in policies_filter.statuses]\n\n # Filter by name wildcard\n if policies_filter.name:\n policies = [p for p in policies if fnmatch(p.policy_name, policies_filter.name)]\n\n # Filter by cloud providers\n if policies_filter.providers:\n policies = [p for p in policies if all(cp.value in p.providers for cp in policies_filter.providers)]\n\n return policies\n\n def policy(self, get_policy: ArkDPAGetPolicy) -> ArkDPADBPolicy:\n \"\"\"\n Retrieves a DB policy by ID.\n\n Args:\n get_policy (ArkDPAGetPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPADBPolicy: _description_\n \"\"\"\n if get_policy.policy_name and not get_policy.policy_id:\n get_policy.policy_id = self.__policy_id_by_name(get_policy.policy_name)\n self._logger.info(f'Retrieving db policy [{get_policy.policy_id}]')\n resp: Response = self.__client.get(DB_POLICY_API.format(policy_id=get_policy.policy_id))\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPADBPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse db policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse db policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to retrieve db policy [{get_policy.policy_id}] [{resp.text}] - [{resp.status_code}]')\n\n def policies_stats(self) -> ArkDPADBPoliciesStats:\n \"\"\"\n Calculates policy statistics.\n\n Returns:\n ArkDPADBPoliciesStats: _description_\n \"\"\"\n self._logger.info('Calculating db policies stats')\n policies = self.list_policies()\n policies_stats = ArkDPADBPoliciesStats.construct()\n policies_stats.policies_count = len(policies)\n\n # Count policies per status\n status_types: Set[ArkDPARuleStatus] = {p.status for p in policies if p.status}\n policies_stats.policies_count_per_status = {st: len([p for p in policies if p.status and p.status == st]) for st in status_types}\n\n # Count policies per platforms\n policies_stats.policies_count_per_provider = {}\n for policy in policies:\n for platform in policy.providers:\n if platform not in policies_stats.policies_count_per_provider:\n policies_stats.policies_count_per_provider[platform] = 0\n policies_stats.policies_count_per_provider[platform] += 1\n\n return policies_stats\n\n @staticmethod\n @overrides\n def service_config() -> ArkServiceConfig:\n return SERVICE_CONFIG" } ]
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.db import ArkDPADBGeneratePolicy from ark_sdk_python.models.common import ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.db import ( ArkDPADB, ArkDPADBAddPolicy, ArkDPADBAppliedTo, ArkDPADBAuthorizationRule, ArkDPADBBaseAuth, ArkDPADBConnectAs, ArkDPADBConnectionInformation, ArkDPADBLDAPAuth, ArkDPADBLocalDBAuth, ArkDPADBMariaDB, ArkDPADBMSSQL, ArkDPADBMySQL, ArkDPADBOracle, ArkDPADBOracleDBAuth, ArkDPADBOracleResource, ArkDPADBPolicy, ArkDPADBPolicyListItem, ArkDPADBPostgres, ArkDPADBProvidersData, ArkDPADBResourceIdentifierType, ArkDPADBUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.db.ark_dpa_db_policies_service import ArkDPADBPoliciesService import inquirer
13,906
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-db-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], )
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-db-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) SUPPORTED_DATABASE_TYPES: Final[List[str]] = [ 'MSSQL', 'MySQL', 'MariaDB', 'Postgres', 'Oracle', ] DEFAULT_GENERATED_POLICY: Final[ArkDPADBPolicy] = ArkDPADBPolicy( policy_name='Default DB Policy', status=ArkDPARuleStatus.Draft, description='Auto generated db policy', providers_data=ArkDPADBProvidersData( postgres=ArkDPADBPostgres( resources=['postgres-onboarded-asset'], ), ), start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], )
DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPADB]] = {
5
2023-11-13 09:24:31+00:00
16k
mohenghui/detectAuto_v8
ultralytics/models/sam/model.py
[ { "identifier": "Model", "path": "ultralytics/engine/model.py", "snippet": "class Model(nn.Module):\n \"\"\"\n A base class to unify APIs for all models.\n\n Args:\n model (str, Path): Path to the model file to load or create.\n task (Any, optional): Task type for the YOLO model. Defaults to None.\n\n Attributes:\n predictor (Any): The predictor object.\n model (Any): The model object.\n trainer (Any): The trainer object.\n task (str): The type of model task.\n ckpt (Any): The checkpoint object if the model loaded from *.pt file.\n cfg (str): The model configuration if loaded from *.yaml file.\n ckpt_path (str): The checkpoint file path.\n overrides (dict): Overrides for the trainer object.\n metrics (Any): The data for metrics.\n\n Methods:\n __call__(source=None, stream=False, **kwargs):\n Alias for the predict method.\n _new(cfg:str, verbose:bool=True) -> None:\n Initializes a new model and infers the task type from the model definitions.\n _load(weights:str, task:str='') -> None:\n Initializes a new model and infers the task type from the model head.\n _check_is_pytorch_model() -> None:\n Raises TypeError if the model is not a PyTorch model.\n reset() -> None:\n Resets the model modules.\n info(verbose:bool=False) -> None:\n Logs the model info.\n fuse() -> None:\n Fuses the model for faster inference.\n predict(source=None, stream=False, **kwargs) -> List[ultralytics.engine.results.Results]:\n Performs prediction using the YOLO model.\n\n Returns:\n list(ultralytics.engine.results.Results): The prediction results.\n \"\"\"\n\n def __init__(self, model: Union[str, Path] = 'yolov8n.pt', task=None) -> None:\n \"\"\"\n Initializes the YOLO model.\n\n Args:\n model (Union[str, Path], optional): Path or name of the model to load or create. Defaults to 'yolov8n.pt'.\n task (Any, optional): Task type for the YOLO model. Defaults to None.\n \"\"\"\n super().__init__()\n self.callbacks = callbacks.get_default_callbacks()\n self.predictor = None # reuse predictor\n self.model = None # model object\n self.trainer = None # trainer object\n self.ckpt = None # if loaded from *.pt\n self.cfg = None # if loaded from *.yaml\n self.ckpt_path = None\n self.overrides = {} # overrides for trainer object\n self.metrics = None # validation/training metrics\n self.session = None # HUB session\n self.task = task # task type\n model = str(model).strip() # strip spaces\n\n # Check if Ultralytics HUB model from https://hub.ultralytics.com\n if self.is_hub_model(model):\n from ultralytics.hub.session import HUBTrainingSession\n self.session = HUBTrainingSession(model)\n model = self.session.model_file\n\n # Check if Triton Server model\n elif self.is_triton_model(model):\n self.model = model\n self.task = task\n return\n\n # Load or create new YOLO model\n model = checks.check_model_file_from_stem(model) # add suffix, i.e. yolov8n -> yolov8n.pt\n if Path(model).suffix in ('.yaml', '.yml'):\n self._new(model, task)\n else:\n self._load(model, task)\n\n def __call__(self, source=None, stream=False, **kwargs):\n \"\"\"Calls the 'predict' function with given arguments to perform object detection.\"\"\"\n return self.predict(source, stream, **kwargs)\n\n @staticmethod\n def is_triton_model(model):\n \"\"\"Is model a Triton Server URL string, i.e. <scheme>://<netloc>/<endpoint>/<task_name>\"\"\"\n from urllib.parse import urlsplit\n url = urlsplit(model)\n return url.netloc and url.path and url.scheme in {'http', 'grfc'}\n\n @staticmethod\n def is_hub_model(model):\n \"\"\"Check if the provided model is a HUB model.\"\"\"\n return any((\n model.startswith(f'{HUB_WEB_ROOT}/models/'), # i.e. https://hub.ultralytics.com/models/MODEL_ID\n [len(x) for x in model.split('_')] == [42, 20], # APIKEY_MODELID\n len(model) == 20 and not Path(model).exists() and all(x not in model for x in './\\\\'))) # MODELID\n\n def _new(self, cfg: str, task=None, model=None, verbose=True):\n \"\"\"\n Initializes a new model and infers the task type from the model definitions.\n\n Args:\n cfg (str): model configuration file\n task (str | None): model task\n model (BaseModel): Customized model.\n verbose (bool): display model info on load\n \"\"\"\n cfg_dict = yaml_model_load(cfg)\n self.cfg = cfg\n self.task = task or guess_model_task(cfg_dict)\n self.model = (model or self._smart_load('model'))(cfg_dict, verbose=verbose and RANK == -1) # build model\n self.overrides['model'] = self.cfg\n self.overrides['task'] = self.task\n\n # Below added to allow export from YAMLs\n self.model.args = {**DEFAULT_CFG_DICT, **self.overrides} # combine default and model args (prefer model args)\n self.model.task = self.task\n\n def _load(self, weights: str, task=None):\n \"\"\"\n Initializes a new model and infers the task type from the model head.\n\n Args:\n weights (str): model checkpoint to be loaded\n task (str | None): model task\n \"\"\"\n suffix = Path(weights).suffix\n if suffix == '.pt':\n self.model, self.ckpt = attempt_load_one_weight(weights)\n self.task = self.model.args['task']\n self.overrides = self.model.args = self._reset_ckpt_args(self.model.args)\n self.ckpt_path = self.model.pt_path\n else:\n weights = checks.check_file(weights)\n self.model, self.ckpt = weights, None\n self.task = task or guess_model_task(weights)\n self.ckpt_path = weights\n self.overrides['model'] = weights\n self.overrides['task'] = self.task\n\n def _check_is_pytorch_model(self):\n \"\"\"Raises TypeError is model is not a PyTorch model.\"\"\"\n pt_str = isinstance(self.model, (str, Path)) and Path(self.model).suffix == '.pt'\n pt_module = isinstance(self.model, nn.Module)\n if not (pt_module or pt_str):\n raise TypeError(\n f\"model='{self.model}' should be a *.pt PyTorch model to run this method, but is a different format. \"\n f\"PyTorch models can train, val, predict and export, i.e. 'model.train(data=...)', but exported \"\n f\"formats like ONNX, TensorRT etc. only support 'predict' and 'val' modes, \"\n f\"i.e. 'yolo predict model=yolov8n.onnx'.\\nTo run CUDA or MPS inference please pass the device \"\n f\"argument directly in your inference command, i.e. 'model.predict(source=..., device=0)'\")\n\n def reset_weights(self):\n \"\"\"Resets the model modules parameters to randomly initialized values, losing all training information.\"\"\"\n self._check_is_pytorch_model()\n for m in self.model.modules():\n if hasattr(m, 'reset_parameters'):\n m.reset_parameters()\n for p in self.model.parameters():\n p.requires_grad = True\n return self\n\n def load(self, weights='yolov8n.pt'):\n \"\"\"Transfers parameters with matching names and shapes from 'weights' to model.\"\"\"\n self._check_is_pytorch_model()\n if isinstance(weights, (str, Path)):\n weights, self.ckpt = attempt_load_one_weight(weights)\n self.model.load(weights)\n return self\n\n def info(self, detailed=False, verbose=True):\n \"\"\"\n Logs model info.\n\n Args:\n detailed (bool): Show detailed information about model.\n verbose (bool): Controls verbosity.\n \"\"\"\n self._check_is_pytorch_model()\n return self.model.info(detailed=detailed, verbose=verbose)\n\n def fuse(self):\n \"\"\"Fuse PyTorch Conv2d and BatchNorm2d layers.\"\"\"\n self._check_is_pytorch_model()\n self.model.fuse()\n\n def predict(self, source=None, stream=False, predictor=None, **kwargs):\n \"\"\"\n Perform prediction using the YOLO model.\n\n Args:\n source (str | int | PIL | np.ndarray): The source of the image to make predictions on.\n Accepts all source types accepted by the YOLO model.\n stream (bool): Whether to stream the predictions or not. Defaults to False.\n predictor (BasePredictor): Customized predictor.\n **kwargs : Additional keyword arguments passed to the predictor.\n Check the 'configuration' section in the documentation for all available options.\n\n Returns:\n (List[ultralytics.engine.results.Results]): The prediction results.\n \"\"\"\n if source is None:\n source = ASSETS\n LOGGER.warning(f\"WARNING ⚠️ 'source' is missing. Using 'source={source}'.\")\n\n is_cli = (sys.argv[0].endswith('yolo') or sys.argv[0].endswith('ultralytics')) and any(\n x in sys.argv for x in ('predict', 'track', 'mode=predict', 'mode=track'))\n\n custom = {'conf': 0.25, 'save': is_cli} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'predict'} # highest priority args on the right\n prompts = args.pop('prompts', None) # for SAM-type models\n\n if not self.predictor:\n self.predictor = (predictor or self._smart_load('predictor'))(overrides=args, _callbacks=self.callbacks)\n self.predictor.setup_model(model=self.model, verbose=is_cli)\n else: # only update args if predictor is already setup\n self.predictor.args = get_cfg(self.predictor.args, args)\n if 'project' in args or 'name' in args:\n self.predictor.save_dir = get_save_dir(self.predictor.args)\n if prompts and hasattr(self.predictor, 'set_prompts'): # for SAM-type models\n self.predictor.set_prompts(prompts)\n return self.predictor.predict_cli(source=source) if is_cli else self.predictor(source=source, stream=stream)\n\n def track(self, source=None, stream=False, persist=False, **kwargs):\n \"\"\"\n Perform object tracking on the input source using the registered trackers.\n\n Args:\n source (str, optional): The input source for object tracking. Can be a file path or a video stream.\n stream (bool, optional): Whether the input source is a video stream. Defaults to False.\n persist (bool, optional): Whether to persist the trackers if they already exist. Defaults to False.\n **kwargs (optional): Additional keyword arguments for the tracking process.\n\n Returns:\n (List[ultralytics.engine.results.Results]): The tracking results.\n \"\"\"\n if not hasattr(self.predictor, 'trackers'):\n from ultralytics.trackers import register_tracker\n register_tracker(self, persist)\n kwargs['conf'] = kwargs.get('conf') or 0.1 # ByteTrack-based method needs low confidence predictions as input\n kwargs['mode'] = 'track'\n return self.predict(source=source, stream=stream, **kwargs)\n\n def val(self, validator=None, **kwargs):\n \"\"\"\n Validate a model on a given dataset.\n\n Args:\n validator (BaseValidator): Customized validator.\n **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs\n \"\"\"\n custom = {'rect': True} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'val'} # highest priority args on the right\n\n validator = (validator or self._smart_load('validator'))(args=args, _callbacks=self.callbacks)\n validator(model=self.model)\n self.metrics = validator.metrics\n return validator.metrics\n\n def benchmark(self, **kwargs):\n \"\"\"\n Benchmark a model on all export formats.\n\n Args:\n **kwargs : Any other args accepted by the validators. To see all args check 'configuration' section in docs\n \"\"\"\n self._check_is_pytorch_model()\n from ultralytics.utils.benchmarks import benchmark\n\n custom = {'verbose': False} # method defaults\n args = {**DEFAULT_CFG_DICT, **self.model.args, **custom, **kwargs, 'mode': 'benchmark'}\n return benchmark(\n model=self,\n data=kwargs.get('data'), # if no 'data' argument passed set data=None for default datasets\n imgsz=args['imgsz'],\n half=args['half'],\n int8=args['int8'],\n device=args['device'],\n verbose=kwargs.get('verbose'))\n\n def export(self, **kwargs):\n \"\"\"\n Export model.\n\n Args:\n **kwargs : Any other args accepted by the Exporter. To see all args check 'configuration' section in docs.\n \"\"\"\n self._check_is_pytorch_model()\n from .exporter import Exporter\n\n custom = {'imgsz': self.model.args['imgsz'], 'batch': 1, 'data': None, 'verbose': False} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'export'} # highest priority args on the right\n return Exporter(overrides=args, _callbacks=self.callbacks)(model=self.model)\n\n def train(self, trainer=None, **kwargs):\n \"\"\"\n Trains the model on a given dataset.\n\n Args:\n trainer (BaseTrainer, optional): Customized trainer.\n **kwargs (Any): Any number of arguments representing the training configuration.\n \"\"\"\n self._check_is_pytorch_model()\n if self.session: # Ultralytics HUB session\n if any(kwargs):\n LOGGER.warning('WARNING ⚠️ using HUB training arguments, ignoring local training arguments.')\n kwargs = self.session.train_args\n checks.check_pip_update_available()\n\n overrides = yaml_load(checks.check_yaml(kwargs['cfg'])) if kwargs.get('cfg') else self.overrides\n custom = {'data': TASK2DATA[self.task]} # method defaults\n args = {**overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right\n if args.get('resume'):\n args['resume'] = self.ckpt_path\n\n self.trainer = (trainer or self._smart_load('trainer'))(overrides=args, _callbacks=self.callbacks)\n if not args.get('resume'): # manually set model only if not resuming\n self.trainer.model = self.trainer.get_model(weights=self.model if self.ckpt else None, cfg=self.model.yaml)\n self.model = self.trainer.model\n self.trainer.hub_session = self.session # attach optional HUB session\n self.trainer.train()\n # Update model and cfg after training\n if RANK in (-1, 0):\n ckpt = self.trainer.best if self.trainer.best.exists() else self.trainer.last\n self.model, _ = attempt_load_one_weight(ckpt)\n self.overrides = self.model.args\n self.metrics = getattr(self.trainer.validator, 'metrics', None) # TODO: no metrics returned by DDP\n return self.metrics\n\n def tune(self, use_ray=False, iterations=10, *args, **kwargs):\n \"\"\"\n Runs hyperparameter tuning, optionally using Ray Tune. See ultralytics.utils.tuner.run_ray_tune for Args.\n\n Returns:\n (dict): A dictionary containing the results of the hyperparameter search.\n \"\"\"\n self._check_is_pytorch_model()\n if use_ray:\n from ultralytics.utils.tuner import run_ray_tune\n return run_ray_tune(self, max_samples=iterations, *args, **kwargs)\n else:\n from .tuner import Tuner\n\n custom = {} # method defaults\n args = {**self.overrides, **custom, **kwargs, 'mode': 'train'} # highest priority args on the right\n return Tuner(args=args, _callbacks=self.callbacks)(model=self, iterations=iterations)\n\n def _apply(self, fn):\n \"\"\"Apply to(), cpu(), cuda(), half(), float() to model tensors that are not parameters or registered buffers.\"\"\"\n self._check_is_pytorch_model()\n self = super()._apply(fn) # noqa\n self.predictor = None # reset predictor as device may have changed\n self.overrides['device'] = self.device # was str(self.device) i.e. device(type='cuda', index=0) -> 'cuda:0'\n return self\n\n @property\n def names(self):\n \"\"\"Returns class names of the loaded model.\"\"\"\n return self.model.names if hasattr(self.model, 'names') else None\n\n @property\n def device(self):\n \"\"\"Returns device if PyTorch model.\"\"\"\n return next(self.model.parameters()).device if isinstance(self.model, nn.Module) else None\n\n @property\n def transforms(self):\n \"\"\"Returns transform of the loaded model.\"\"\"\n return self.model.transforms if hasattr(self.model, 'transforms') else None\n\n def add_callback(self, event: str, func):\n \"\"\"Add a callback.\"\"\"\n self.callbacks[event].append(func)\n\n def clear_callback(self, event: str):\n \"\"\"Clear all event callbacks.\"\"\"\n self.callbacks[event] = []\n\n def reset_callbacks(self):\n \"\"\"Reset all registered callbacks.\"\"\"\n for event in callbacks.default_callbacks.keys():\n self.callbacks[event] = [callbacks.default_callbacks[event][0]]\n\n @staticmethod\n def _reset_ckpt_args(args):\n \"\"\"Reset arguments when loading a PyTorch model.\"\"\"\n include = {'imgsz', 'data', 'task', 'single_cls'} # only remember these arguments when loading a PyTorch model\n return {k: v for k, v in args.items() if k in include}\n\n # def __getattr__(self, attr):\n # \"\"\"Raises error if object has no requested attribute.\"\"\"\n # name = self.__class__.__name__\n # raise AttributeError(f\"'{name}' object has no attribute '{attr}'. See valid attributes below.\\n{self.__doc__}\")\n\n def _smart_load(self, key):\n \"\"\"Load model/trainer/validator/predictor.\"\"\"\n try:\n return self.task_map[self.task][key]\n except Exception as e:\n name = self.__class__.__name__\n mode = inspect.stack()[1][3] # get the function name.\n raise NotImplementedError(\n emojis(f\"WARNING ⚠️ '{name}' model does not support '{mode}' mode for '{self.task}' task yet.\")) from e\n\n @property\n def task_map(self):\n \"\"\"\n Map head to model, trainer, validator, and predictor classes.\n\n Returns:\n task_map (dict): The map of model task to mode classes.\n \"\"\"\n raise NotImplementedError('Please provide task map for your model!')" }, { "identifier": "model_info", "path": "ultralytics/utils/torch_utils.py", "snippet": "def model_info(model, detailed=False, verbose=True, imgsz=640):\n \"\"\"\n Model information.\n\n imgsz may be int or list, i.e. imgsz=640 or imgsz=[640, 320].\n \"\"\"\n if not verbose:\n return\n n_p = get_num_params(model) # number of parameters\n n_g = get_num_gradients(model) # number of gradients\n n_l = len(list(model.modules())) # number of layers\n if detailed:\n LOGGER.info(\n f\"{'layer':>5} {'name':>40} {'gradient':>9} {'parameters':>12} {'shape':>20} {'mu':>10} {'sigma':>10}\")\n for i, (name, p) in enumerate(model.named_parameters()):\n name = name.replace('module_list.', '')\n LOGGER.info('%5g %40s %9s %12g %20s %10.3g %10.3g %10s' %\n (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std(), p.dtype))\n\n flops = get_flops(model, imgsz)\n fused = ' (fused)' if getattr(model, 'is_fused', lambda: False)() else ''\n fs = f', {flops:.1f} GFLOPs' if flops else ''\n yaml_file = getattr(model, 'yaml_file', '') or getattr(model, 'yaml', {}).get('yaml_file', '')\n model_name = Path(yaml_file).stem.replace('yolo', 'YOLO') or 'Model'\n LOGGER.info(f'{model_name} summary{fused}: {n_l} layers, {n_p} parameters, {n_g} gradients{fs}')\n return n_l, n_p, n_g, flops" }, { "identifier": "build_sam", "path": "ultralytics/models/sam/build.py", "snippet": "def build_sam(ckpt='sam_b.pt'):\n \"\"\"Build a SAM model specified by ckpt.\"\"\"\n model_builder = None\n ckpt = str(ckpt) # to allow Path ckpt types\n for k in sam_model_map.keys():\n if ckpt.endswith(k):\n model_builder = sam_model_map.get(k)\n\n if not model_builder:\n raise FileNotFoundError(f'{ckpt} is not a supported SAM model. Available models are: \\n {sam_model_map.keys()}')\n\n return model_builder(ckpt)" }, { "identifier": "Predictor", "path": "ultralytics/models/sam/predict.py", "snippet": "class Predictor(BasePredictor):\n \"\"\"\n Predictor class for the Segment Anything Model (SAM), extending BasePredictor.\n\n The class provides an interface for model inference tailored to image segmentation tasks.\n With advanced architecture and promptable segmentation capabilities, it facilitates flexible and real-time\n mask generation. The class is capable of working with various types of prompts such as bounding boxes,\n points, and low-resolution masks.\n\n Attributes:\n cfg (dict): Configuration dictionary specifying model and task-related parameters.\n overrides (dict): Dictionary containing values that override the default configuration.\n _callbacks (dict): Dictionary of user-defined callback functions to augment behavior.\n args (namespace): Namespace to hold command-line arguments or other operational variables.\n im (torch.Tensor): Preprocessed input image tensor.\n features (torch.Tensor): Extracted image features used for inference.\n prompts (dict): Collection of various prompt types, such as bounding boxes and points.\n segment_all (bool): Flag to control whether to segment all objects in the image or only specified ones.\n \"\"\"\n\n def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):\n \"\"\"\n Initialize the Predictor with configuration, overrides, and callbacks.\n\n The method sets up the Predictor object and applies any configuration overrides or callbacks provided. It\n initializes task-specific settings for SAM, such as retina_masks being set to True for optimal results.\n\n Args:\n cfg (dict): Configuration dictionary.\n overrides (dict, optional): Dictionary of values to override default configuration.\n _callbacks (dict, optional): Dictionary of callback functions to customize behavior.\n \"\"\"\n if overrides is None:\n overrides = {}\n overrides.update(dict(task='segment', mode='predict', imgsz=1024))\n super().__init__(cfg, overrides, _callbacks)\n self.args.retina_masks = True\n self.im = None\n self.features = None\n self.prompts = {}\n self.segment_all = False\n\n def preprocess(self, im):\n \"\"\"\n Preprocess the input image for model inference.\n\n The method prepares the input image by applying transformations and normalization.\n It supports both torch.Tensor and list of np.ndarray as input formats.\n\n Args:\n im (torch.Tensor | List[np.ndarray]): BCHW tensor format or list of HWC numpy arrays.\n\n Returns:\n (torch.Tensor): The preprocessed image tensor.\n \"\"\"\n if self.im is not None:\n return self.im\n not_tensor = not isinstance(im, torch.Tensor)\n if not_tensor:\n im = np.stack(self.pre_transform(im))\n im = im[..., ::-1].transpose((0, 3, 1, 2))\n im = np.ascontiguousarray(im)\n im = torch.from_numpy(im)\n\n im = im.to(self.device)\n im = im.half() if self.model.fp16 else im.float()\n if not_tensor:\n im = (im - self.mean) / self.std\n return im\n\n def pre_transform(self, im):\n \"\"\"\n Perform initial transformations on the input image for preprocessing.\n\n The method applies transformations such as resizing to prepare the image for further preprocessing.\n Currently, batched inference is not supported; hence the list length should be 1.\n\n Args:\n im (List[np.ndarray]): List containing images in HWC numpy array format.\n\n Returns:\n (List[np.ndarray]): List of transformed images.\n \"\"\"\n assert len(im) == 1, 'SAM model does not currently support batched inference'\n letterbox = LetterBox(self.args.imgsz, auto=False, center=False)\n return [letterbox(image=x) for x in im]\n\n def inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False, *args, **kwargs):\n \"\"\"\n Perform image segmentation inference based on the given input cues, using the currently loaded image. This\n method leverages SAM's (Segment Anything Model) architecture consisting of image encoder, prompt encoder, and\n mask decoder for real-time and promptable segmentation tasks.\n\n Args:\n im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).\n bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.\n points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates.\n labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background.\n masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256.\n multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.\n\n Returns:\n (tuple): Contains the following three elements.\n - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks.\n - np.ndarray: An array of length C containing quality scores predicted by the model for each mask.\n - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256.\n \"\"\"\n # Override prompts if any stored in self.prompts\n bboxes = self.prompts.pop('bboxes', bboxes)\n points = self.prompts.pop('points', points)\n masks = self.prompts.pop('masks', masks)\n\n if all(i is None for i in [bboxes, points, masks]):\n return self.generate(im, *args, **kwargs)\n\n return self.prompt_inference(im, bboxes, points, labels, masks, multimask_output)\n\n def prompt_inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False):\n \"\"\"\n Internal function for image segmentation inference based on cues like bounding boxes, points, and masks.\n Leverages SAM's specialized architecture for prompt-based, real-time segmentation.\n\n Args:\n im (torch.Tensor): The preprocessed input image in tensor format, with shape (N, C, H, W).\n bboxes (np.ndarray | List, optional): Bounding boxes with shape (N, 4), in XYXY format.\n points (np.ndarray | List, optional): Points indicating object locations with shape (N, 2), in pixel coordinates.\n labels (np.ndarray | List, optional): Labels for point prompts, shape (N, ). 1 for foreground and 0 for background.\n masks (np.ndarray, optional): Low-resolution masks from previous predictions. Shape should be (N, H, W). For SAM, H=W=256.\n multimask_output (bool, optional): Flag to return multiple masks. Helpful for ambiguous prompts. Defaults to False.\n\n Returns:\n (tuple): Contains the following three elements.\n - np.ndarray: The output masks in shape CxHxW, where C is the number of generated masks.\n - np.ndarray: An array of length C containing quality scores predicted by the model for each mask.\n - np.ndarray: Low-resolution logits of shape CxHxW for subsequent inference, where H=W=256.\n \"\"\"\n features = self.model.image_encoder(im) if self.features is None else self.features\n\n src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]\n r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])\n # Transform input prompts\n if points is not None:\n points = torch.as_tensor(points, dtype=torch.float32, device=self.device)\n points = points[None] if points.ndim == 1 else points\n # Assuming labels are all positive if users don't pass labels.\n if labels is None:\n labels = np.ones(points.shape[0])\n labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)\n points *= r\n # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)\n points, labels = points[:, None, :], labels[:, None]\n if bboxes is not None:\n bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)\n bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes\n bboxes *= r\n if masks is not None:\n masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device).unsqueeze(1)\n\n points = (points, labels) if points is not None else None\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(points=points, boxes=bboxes, masks=masks)\n\n # Predict masks\n pred_masks, pred_scores = self.model.mask_decoder(\n image_embeddings=features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )\n # `d` could be 1 or 3 depends on `multimask_output`.\n return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)\n\n def generate(self,\n im,\n crop_n_layers=0,\n crop_overlap_ratio=512 / 1500,\n crop_downscale_factor=1,\n point_grids=None,\n points_stride=32,\n points_batch_size=64,\n conf_thres=0.88,\n stability_score_thresh=0.95,\n stability_score_offset=0.95,\n crop_nms_thresh=0.7):\n \"\"\"\n Perform image segmentation using the Segment Anything Model (SAM).\n\n This function segments an entire image into constituent parts by leveraging SAM's advanced architecture\n and real-time performance capabilities. It can optionally work on image crops for finer segmentation.\n\n Args:\n im (torch.Tensor): Input tensor representing the preprocessed image with dimensions (N, C, H, W).\n crop_n_layers (int): Specifies the number of layers for additional mask predictions on image crops.\n Each layer produces 2**i_layer number of image crops.\n crop_overlap_ratio (float): Determines the extent of overlap between crops. Scaled down in subsequent layers.\n crop_downscale_factor (int): Scaling factor for the number of sampled points-per-side in each layer.\n point_grids (list[np.ndarray], optional): Custom grids for point sampling normalized to [0,1].\n Used in the nth crop layer.\n points_stride (int, optional): Number of points to sample along each side of the image.\n Exclusive with 'point_grids'.\n points_batch_size (int): Batch size for the number of points processed simultaneously.\n conf_thres (float): Confidence threshold [0,1] for filtering based on the model's mask quality prediction.\n stability_score_thresh (float): Stability threshold [0,1] for mask filtering based on mask stability.\n stability_score_offset (float): Offset value for calculating stability score.\n crop_nms_thresh (float): IoU cutoff for Non-Maximum Suppression (NMS) to remove duplicate masks between crops.\n\n Returns:\n (tuple): A tuple containing segmented masks, confidence scores, and bounding boxes.\n \"\"\"\n self.segment_all = True\n ih, iw = im.shape[2:]\n crop_regions, layer_idxs = generate_crop_boxes((ih, iw), crop_n_layers, crop_overlap_ratio)\n if point_grids is None:\n point_grids = build_all_layer_point_grids(points_stride, crop_n_layers, crop_downscale_factor)\n pred_masks, pred_scores, pred_bboxes, region_areas = [], [], [], []\n for crop_region, layer_idx in zip(crop_regions, layer_idxs):\n x1, y1, x2, y2 = crop_region\n w, h = x2 - x1, y2 - y1\n area = torch.tensor(w * h, device=im.device)\n points_scale = np.array([[w, h]]) # w, h\n # Crop image and interpolate to input size\n crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode='bilinear', align_corners=False)\n # (num_points, 2)\n points_for_image = point_grids[layer_idx] * points_scale\n crop_masks, crop_scores, crop_bboxes = [], [], []\n for (points, ) in batch_iterator(points_batch_size, points_for_image):\n pred_mask, pred_score = self.prompt_inference(crop_im, points=points, multimask_output=True)\n # Interpolate predicted masks to input size\n pred_mask = F.interpolate(pred_mask[None], (h, w), mode='bilinear', align_corners=False)[0]\n idx = pred_score > conf_thres\n pred_mask, pred_score = pred_mask[idx], pred_score[idx]\n\n stability_score = calculate_stability_score(pred_mask, self.model.mask_threshold,\n stability_score_offset)\n idx = stability_score > stability_score_thresh\n pred_mask, pred_score = pred_mask[idx], pred_score[idx]\n # Bool type is much more memory-efficient.\n pred_mask = pred_mask > self.model.mask_threshold\n # (N, 4)\n pred_bbox = batched_mask_to_box(pred_mask).float()\n keep_mask = ~is_box_near_crop_edge(pred_bbox, crop_region, [0, 0, iw, ih])\n if not torch.all(keep_mask):\n pred_bbox, pred_mask, pred_score = pred_bbox[keep_mask], pred_mask[keep_mask], pred_score[keep_mask]\n\n crop_masks.append(pred_mask)\n crop_bboxes.append(pred_bbox)\n crop_scores.append(pred_score)\n\n # Do nms within this crop\n crop_masks = torch.cat(crop_masks)\n crop_bboxes = torch.cat(crop_bboxes)\n crop_scores = torch.cat(crop_scores)\n keep = torchvision.ops.nms(crop_bboxes, crop_scores, self.args.iou) # NMS\n crop_bboxes = uncrop_boxes_xyxy(crop_bboxes[keep], crop_region)\n crop_masks = uncrop_masks(crop_masks[keep], crop_region, ih, iw)\n crop_scores = crop_scores[keep]\n\n pred_masks.append(crop_masks)\n pred_bboxes.append(crop_bboxes)\n pred_scores.append(crop_scores)\n region_areas.append(area.expand(len(crop_masks)))\n\n pred_masks = torch.cat(pred_masks)\n pred_bboxes = torch.cat(pred_bboxes)\n pred_scores = torch.cat(pred_scores)\n region_areas = torch.cat(region_areas)\n\n # Remove duplicate masks between crops\n if len(crop_regions) > 1:\n scores = 1 / region_areas\n keep = torchvision.ops.nms(pred_bboxes, scores, crop_nms_thresh)\n pred_masks, pred_bboxes, pred_scores = pred_masks[keep], pred_bboxes[keep], pred_scores[keep]\n\n return pred_masks, pred_scores, pred_bboxes\n\n def setup_model(self, model, verbose=True):\n \"\"\"\n Initializes the Segment Anything Model (SAM) for inference.\n\n This method sets up the SAM model by allocating it to the appropriate device and initializing the necessary\n parameters for image normalization and other Ultralytics compatibility settings.\n\n Args:\n model (torch.nn.Module): A pre-trained SAM model. If None, a model will be built based on configuration.\n verbose (bool): If True, prints selected device information.\n\n Attributes:\n model (torch.nn.Module): The SAM model allocated to the chosen device for inference.\n device (torch.device): The device to which the model and tensors are allocated.\n mean (torch.Tensor): The mean values for image normalization.\n std (torch.Tensor): The standard deviation values for image normalization.\n \"\"\"\n device = select_device(self.args.device, verbose=verbose)\n if model is None:\n model = build_sam(self.args.model)\n model.eval()\n self.model = model.to(device)\n self.device = device\n self.mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1).to(device)\n self.std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1).to(device)\n\n # Ultralytics compatibility settings\n self.model.pt = False\n self.model.triton = False\n self.model.stride = 32\n self.model.fp16 = False\n self.done_warmup = True\n\n def postprocess(self, preds, img, orig_imgs):\n \"\"\"\n Post-processes SAM's inference outputs to generate object detection masks and bounding boxes.\n\n The method scales masks and boxes to the original image size and applies a threshold to the mask predictions. The\n SAM model uses advanced architecture and promptable segmentation tasks to achieve real-time performance.\n\n Args:\n preds (tuple): The output from SAM model inference, containing masks, scores, and optional bounding boxes.\n img (torch.Tensor): The processed input image tensor.\n orig_imgs (list | torch.Tensor): The original, unprocessed images.\n\n Returns:\n (list): List of Results objects containing detection masks, bounding boxes, and other metadata.\n \"\"\"\n # (N, 1, H, W), (N, 1)\n pred_masks, pred_scores = preds[:2]\n pred_bboxes = preds[2] if self.segment_all else None\n names = dict(enumerate(str(i) for i in range(len(pred_masks))))\n\n if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list\n orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)\n\n results = []\n for i, masks in enumerate([pred_masks]):\n orig_img = orig_imgs[i]\n if pred_bboxes is not None:\n pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)\n cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)\n pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1)\n\n masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]\n masks = masks > self.model.mask_threshold # to bool\n img_path = self.batch[0][i]\n results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))\n # Reset segment-all mode.\n self.segment_all = False\n return results\n\n def setup_source(self, source):\n \"\"\"\n Sets up the data source for inference.\n\n This method configures the data source from which images will be fetched for inference. The source could be a\n directory, a video file, or other types of image data sources.\n\n Args:\n source (str | Path): The path to the image data source for inference.\n \"\"\"\n if source is not None:\n super().setup_source(source)\n\n def set_image(self, image):\n \"\"\"\n Preprocesses and sets a single image for inference.\n\n This function sets up the model if not already initialized, configures the data source to the specified image,\n and preprocesses the image for feature extraction. Only one image can be set at a time.\n\n Args:\n image (str | np.ndarray): Image file path as a string, or a np.ndarray image read by cv2.\n\n Raises:\n AssertionError: If more than one image is set.\n \"\"\"\n if self.model is None:\n model = build_sam(self.args.model)\n self.setup_model(model)\n self.setup_source(image)\n assert len(self.dataset) == 1, '`set_image` only supports setting one image!'\n for batch in self.dataset:\n im = self.preprocess(batch[1])\n self.features = self.model.image_encoder(im)\n self.im = im\n break\n\n def set_prompts(self, prompts):\n \"\"\"Set prompts in advance.\"\"\"\n self.prompts = prompts\n\n def reset_image(self):\n \"\"\"Resets the image and its features to None.\"\"\"\n self.im = None\n self.features = None\n\n @staticmethod\n def remove_small_regions(masks, min_area=0, nms_thresh=0.7):\n \"\"\"\n Perform post-processing on segmentation masks generated by the Segment Anything Model (SAM). Specifically, this\n function removes small disconnected regions and holes from the input masks, and then performs Non-Maximum\n Suppression (NMS) to eliminate any newly created duplicate boxes.\n\n Args:\n masks (torch.Tensor): A tensor containing the masks to be processed. Shape should be (N, H, W), where N is\n the number of masks, H is height, and W is width.\n min_area (int): The minimum area below which disconnected regions and holes will be removed. Defaults to 0.\n nms_thresh (float): The IoU threshold for the NMS algorithm. Defaults to 0.7.\n\n Returns:\n (tuple([torch.Tensor, List[int]])):\n - new_masks (torch.Tensor): The processed masks with small regions removed. Shape is (N, H, W).\n - keep (List[int]): The indices of the remaining masks post-NMS, which can be used to filter the boxes.\n \"\"\"\n if len(masks) == 0:\n return masks\n\n # Filter small disconnected regions and holes\n new_masks = []\n scores = []\n for mask in masks:\n mask = mask.cpu().numpy().astype(np.uint8)\n mask, changed = remove_small_regions(mask, min_area, mode='holes')\n unchanged = not changed\n mask, changed = remove_small_regions(mask, min_area, mode='islands')\n unchanged = unchanged and not changed\n\n new_masks.append(torch.as_tensor(mask).unsqueeze(0))\n # Give score=0 to changed masks and 1 to unchanged masks so NMS prefers masks not needing postprocessing\n scores.append(float(unchanged))\n\n # Recalculate boxes and remove any new duplicates\n new_masks = torch.cat(new_masks, dim=0)\n boxes = batched_mask_to_box(new_masks)\n keep = torchvision.ops.nms(boxes.float(), torch.as_tensor(scores), nms_thresh)\n\n return new_masks[keep].to(device=masks.device, dtype=masks.dtype), keep" } ]
from pathlib import Path from ultralytics.engine.model import Model from ultralytics.utils.torch_utils import model_info from .build import build_sam from .predict import Predictor
11,471
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """ self.model = build_sam(weights) def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Performs segmentation prediction on the given image or video source. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) kwargs.update(overrides) prompts = dict(bboxes=bboxes, points=points, labels=labels) return super().predict(source, stream, prompts=prompts, **kwargs) def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Alias for the 'predict' method. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ return self.predict(source, stream, bboxes, points, labels, **kwargs) def info(self, detailed=False, verbose=True): """ Logs information about the SAM model. Args: detailed (bool, optional): If True, displays detailed information about the model. Defaults to False. verbose (bool, optional): If True, displays information on the console. Defaults to True. Returns: (tuple): A tuple containing the model's information. """
# Ultralytics YOLO 🚀, AGPL-3.0 license """ SAM model interface. This module provides an interface to the Segment Anything Model (SAM) from Ultralytics, designed for real-time image segmentation tasks. The SAM model allows for promptable segmentation with unparalleled versatility in image analysis, and has been trained on the SA-1B dataset. It features zero-shot performance capabilities, enabling it to adapt to new image distributions and tasks without prior knowledge. Key Features: - Promptable segmentation - Real-time performance - Zero-shot transfer capabilities - Trained on SA-1B dataset """ class SAM(Model): """ SAM (Segment Anything Model) interface class. SAM is designed for promptable real-time image segmentation. It can be used with a variety of prompts such as bounding boxes, points, or labels. The model has capabilities for zero-shot performance and is trained on the SA-1B dataset. """ def __init__(self, model='sam_b.pt') -> None: """ Initializes the SAM model with a pre-trained model file. Args: model (str): Path to the pre-trained SAM model file. File should have a .pt or .pth extension. Raises: NotImplementedError: If the model file extension is not .pt or .pth. """ if model and Path(model).suffix not in ('.pt', '.pth'): raise NotImplementedError('SAM prediction requires pre-trained *.pt or *.pth model.') super().__init__(model=model, task='segment') def _load(self, weights: str, task=None): """ Loads the specified weights into the SAM model. Args: weights (str): Path to the weights file. task (str, optional): Task name. Defaults to None. """ self.model = build_sam(weights) def predict(self, source, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Performs segmentation prediction on the given image or video source. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ overrides = dict(conf=0.25, task='segment', mode='predict', imgsz=1024) kwargs.update(overrides) prompts = dict(bboxes=bboxes, points=points, labels=labels) return super().predict(source, stream, prompts=prompts, **kwargs) def __call__(self, source=None, stream=False, bboxes=None, points=None, labels=None, **kwargs): """ Alias for the 'predict' method. Args: source (str): Path to the image or video file, or a PIL.Image object, or a numpy.ndarray object. stream (bool, optional): If True, enables real-time streaming. Defaults to False. bboxes (list, optional): List of bounding box coordinates for prompted segmentation. Defaults to None. points (list, optional): List of points for prompted segmentation. Defaults to None. labels (list, optional): List of labels for prompted segmentation. Defaults to None. Returns: (list): The model predictions. """ return self.predict(source, stream, bboxes, points, labels, **kwargs) def info(self, detailed=False, verbose=True): """ Logs information about the SAM model. Args: detailed (bool, optional): If True, displays detailed information about the model. Defaults to False. verbose (bool, optional): If True, displays information on the console. Defaults to True. Returns: (tuple): A tuple containing the model's information. """
return model_info(self.model, detailed=detailed, verbose=verbose)
1
2023-11-16 12:49:59+00:00
16k
Aues6uen11Z/Zafkiel
tests/test.py
[ { "identifier": "logger", "path": "zafkiel/logger.py", "snippet": "" }, { "identifier": "Config", "path": "zafkiel/config.py", "snippet": "class Config:\n ST = Settings\n ST.CVSTRATEGY = [\"mstpl\", \"sift\"]\n ST.THRESHOLD = 0.8\n\n GAME_PATH = None\n SERVER_LANG = 'cn'\n\n # Top, left and bottom boundary pixel values when running in a bordered program\n # The value on my Win10 computer, may not accurate for everyone.\n BORDER = (32, 3, 2)" }, { "identifier": "API", "path": "zafkiel/device/api.py", "snippet": "class API:\n \"\"\"\n Device Setup APIs\n \"\"\"\n\n @staticmethod\n def init_device(platform=\"Android\", uuid=None, **kwargs):\n return init_device(platform, uuid, **kwargs)\n\n @staticmethod\n def connect_device(uri):\n return connect_device(uri)\n\n @staticmethod\n def device():\n return device()\n\n @staticmethod\n def set_current(idx):\n set_current(idx)\n\n @staticmethod\n def auto_setup(\n basedir: str = None,\n devices: list = None,\n firing_time: int = 30,\n logdir: bool = None,\n project_root: str = None,\n compress: int = None\n ):\n \"\"\"\n Auto setup running env and try to connect device if no device is connected.\n\n Args:\n basedir: basedir of script, __file__ is also acceptable.\n devices: connect_device uri in list.\n firing_time: Game starts taking time, this value should be set larger in old machine.\n logdir: log dir for script report, default is None for no log, set to ``True`` for ``<basedir>/log``.\n project_root: Project root dir for `using` api.\n compress: The compression rate of the screenshot image, integer in range [1, 99], default is 10\n\n Examples:\n auto_setup(__file__)\n auto_setup(__file__, devices=[\"Android://127.0.0.1:5037/SJE5T17B17\"],\n ... logdir=True, project_root=r\"D:\\\\test\\\\logs\", compress=90)\n \"\"\"\n if basedir:\n if os.path.isfile(basedir):\n basedir = os.path.dirname(basedir)\n if basedir not in G.BASEDIR:\n G.BASEDIR.append(basedir)\n if devices:\n startup_time = Timer(firing_time).start()\n for dev in devices:\n while not startup_time.reached():\n try:\n connect_device(dev)\n break\n except ElementNotFoundError:\n time.sleep(3)\n if startup_time.reached():\n raise NotRunningError(dev)\n if logdir:\n logdir = script_log_dir(basedir, logdir)\n set_logdir(logdir)\n if project_root:\n ST.PROJECT_ROOT = project_root\n if compress:\n ST.SNAPSHOT_QUALITY = compress\n\n \"\"\"\n Device Operations\n \"\"\"\n\n @staticmethod\n def app_is_running() -> bool:\n \"\"\"\n Platforms:\n Windows\n\n Returns:\n Whether app is running\n \"\"\"\n return G.DEVICE.app_is_running()\n\n @staticmethod\n def stop_app(package=None):\n \"\"\"\n Stop the target application on device\n\n Return:\n Has the Windows application stopped, on Android and iOS no return.\n\n Platforms:\n Android, iOS, Windows\n\n Example:\n stop_app(\"com.netease.cloudmusic\")\n stop_app() # only test on Windows\n \"\"\"\n return G.DEVICE.stop_app(package)\n\n @staticmethod\n @logwrap\n def touch(\n v: Template or tuple,\n times: int = 1,\n blind: bool = False,\n interval: float = 0.05,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the touch action on the device screen\n\n Args:\n v: Target to touch, either a ``ImageTemplate`` instance or absolute coordinates (x, y).\n times: How many touches to be performed\n blind: Whether to recognize Template, sometimes we only need to click without caring about the image.\n interval: Time interval between two touches.\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n **kwargs: Platform specific `kwargs`, please refer to corresponding docs.\n\n Returns:\n Final position to be clicked, e.g. (100, 100)\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n Click absolute coordinates:\n touch((100, 100))\n Click 2 times:\n touch((100, 100), times=2)\n Under Android and Windows platforms, you can set the click duration:\n touch((100, 100), duration=2)\n Right click(Windows):\n touch((100, 100), right_click=True)\n \"\"\"\n if isinstance(v, Template):\n if blind:\n center_pos = (v.area[2] + v.area[0]) / 2, (v.area[3] + v.area[1]) / 2\n else:\n center_pos = loop_find(v, timeout=ST.FIND_TIMEOUT, cls=cls, ocr_mode=ocr_mode)\n\n h = v.height * v.ratio()\n w = v.width * v.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(center_pos, h, w)\n else:\n try_log_screen()\n pos = v\n for _ in range(times):\n G.DEVICE.touch(pos, **kwargs)\n time.sleep(interval)\n delay_after_operation()\n return pos\n\n @logwrap\n def find_click(\n self,\n rec_template: Template,\n touch_template: Template = None,\n times: int = 1,\n timeout: float = 1,\n blind: bool = False,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> bool:\n \"\"\"\n Find the template image and click it or another image area.\n\n Args:\n rec_template: \"Template\" instance to be found.\n touch_template: \"ImageTemplate\" instance to be clicked, defaults to None which means click rec_template.\n times: How many touches to be performed.\n timeout: Time interval to wait for the match.\n blind: Whether to recognize Template, same as parameter of touch().\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n bool: Whether the target image appear and click it.\n \"\"\"\n try:\n pos = self.wait(rec_template, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n h = rec_template.height * rec_template.ratio()\n w = rec_template.width * rec_template.ratio() # actual height and width of target in screen\n pos = random_rectangle_point(pos, h, w)\n except TargetNotFoundError:\n return False\n\n if touch_template:\n self.touch(touch_template, times, blind, ocr_mode=ocr_mode, cls=cls)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{touch_template.name}\")\n else:\n self.touch(pos, times)\n logger.info((f\"Click{pos} {times} times\" if times > 1 else f\"Click{pos}\") + f\" @{rec_template.name}\")\n return True\n\n @staticmethod\n @logwrap\n def exists(v: Template, timeout: float = 0, ocr_mode: int = 0, cls: Type[Ocr] = Ocr) -> bool or tuple:\n \"\"\"\n Check whether given target exists on device screen\n\n Args:\n v: target to be checked\n timeout: time limit, default is 0 which means loop_find will only search once\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Returns:\n False if target is not found, otherwise returns the coordinates of the target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n if exists(ImageTemplate(r\"tpl1606822430589.png\")):\n touch(ImageTemplate(r\"tpl1606822430589.png\"))\n\n Since ``exists()`` will return the coordinates,\n we can directly click on this return value to reduce one image search:\n\n pos = exists(ImageTemplate(r\"tpl1606822430589.png\"))\n if pos:\n touch(pos)\n \"\"\"\n try:\n pos = loop_find(v, timeout=timeout, ocr_mode=ocr_mode, cls=cls)\n except TargetNotFoundError:\n return False\n else:\n return pos\n\n @staticmethod\n @logwrap\n def wait(\n v: Template,\n timeout: float = None,\n interval: float = 0.5,\n interval_func: Callable = None,\n ocr_mode: int = 0,\n cls: Type[Ocr] = Ocr\n ) -> tuple:\n \"\"\"\n Wait to match the Template on the device screen\n\n Args:\n v: target object to wait for, Template instance\n timeout: time interval to wait for the match, default is None which is ``ST.FIND_TIMEOUT``\n interval: time interval in seconds to attempt to find a match\n interval_func: called after each unsuccessful attempt to find the corresponding match\n ocr_mode: Ocr match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n cls: \"Ocr\" class or its subclass\n\n Raises:\n TargetNotFoundError: raised if target is not found after the time limit expired\n\n Returns:\n coordinates of the matched target\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n wait(Template(r\"tpl1606821804906.png\")) # timeout after ST.FIND_TIMEOUT\n # find Template every 3 seconds, timeout after 120 seconds\n wait(Template(r\"tpl1606821804906.png\"), timeout=120, interval=3)\n\n You can specify a callback function every time the search target fails::\n\n def notfound():\n print(\"No target found\")\n wait(Template(r\"tpl1607510661400.png\"), interval_func=notfound)\n \"\"\"\n if timeout is None:\n timeout = ST.FIND_TIMEOUT\n pos = loop_find(v, timeout=timeout, interval=interval, interval_func=interval_func, ocr_mode=ocr_mode, cls=cls)\n\n return pos\n\n @staticmethod\n def swipe(\n v1: Template or tuple,\n v2: Template or tuple = None,\n vector: tuple = None,\n blind1: bool = False,\n blind2: bool = False,\n **kwargs\n ) -> tuple:\n \"\"\"\n Perform the swipe action on the device screen.\n\n There are two ways of assigning the parameters\n * ``swipe(v1, v2=Template(...))`` # swipe from v1 to v2\n * ``swipe(v1, vector=(x, y))`` # swipe starts at v1 and moves along the vector.\n\n Args:\n v1: the start point of swipe, either a Template instance or absolute coordinates (x, y)\n v2: the end point of swipe, either a Template instance or absolute coordinates (x, y)\n vector: a vector coordinates of swipe action, either absolute coordinates (x, y) or percentage of\n screen e.g.(0.5, 0.5)\n blind1: Whether to recognize Template1, same as parameter of touch().\n blind2: Whether to recognize Template2, same as parameter of touch().\n **kwargs: platform specific `kwargs`, please refer to corresponding docs\n\n Raises:\n general exception when not enough parameters to perform swap action have been provided\n\n Returns:\n Origin position and target position\n\n Platforms:\n Android, Windows, iOS\n\n Examples:\n swipe(Template(r\"tpl1606814865574.png\"), vector=[-0.0316, -0.3311])\n swipe((100, 100), (200, 200))\n\n Custom swiping duration and number of steps(Android and iOS)::\n\n # swiping lasts for 1 second, divided into 6 steps\n swipe((100, 100), (200, 200), duration=1, steps=6)\n \"\"\"\n if isinstance(v1, Template):\n if blind1:\n pos1 = (v1.area[2] + v1.area[0]) / 2, (v1.area[3] + v1.area[1]) / 2\n else:\n pos1 = loop_find(v1, timeout=ST.FIND_TIMEOUT)\n else:\n try_log_screen()\n pos1 = v1\n\n if v2:\n if isinstance(v2, Template):\n if blind2:\n pos2 = (v2.area[2] + v2.area[0]) / 2, (v2.area[3] + v2.area[1]) / 2\n else:\n pos2 = loop_find(v2, timeout=ST.FIND_TIMEOUT_TMP)\n else:\n pos2 = v2\n elif vector:\n if vector[0] <= 1 and vector[1] <= 1:\n w, h = G.DEVICE.get_current_resolution()\n vector = (int(vector[0] * w), int(vector[1] * h))\n pos2 = (pos1[0] + vector[0], pos1[1] + vector[1])\n else:\n raise ScriptError(\"no enough params for swipe\")\n\n G.DEVICE.swipe(pos1, pos2, **kwargs)\n delay_after_operation()\n logger.info(f\"Swipe {pos1} -> {pos2}\")\n return pos1, pos2\n\n @staticmethod\n def screenshot():\n \"\"\"\n Returns:\n Screenshot image\n \"\"\"\n return G.DEVICE.snapshot(filename=None, quality=ST.SNAPSHOT_QUALITY)\n\n @staticmethod\n def snapshot(filename=None, msg=\"\", quality=None, max_size=None):\n \"\"\"\n Returns:\n {\"screen\": filename, \"resolution\": resolution of the screen} or None\n \"\"\"\n return snapshot(filename, msg, quality, max_size)\n\n @staticmethod\n def shell(cmd):\n return shell(cmd)\n\n @staticmethod\n def start_app(package, activity=None):\n start_app(package, activity)\n\n @staticmethod\n def clear_app(package):\n clear_app(package)\n\n @staticmethod\n def install(filepath, **kwargs):\n return install(filepath, **kwargs)\n\n @staticmethod\n def uninstall(package):\n return uninstall(package)\n\n @staticmethod\n def wake():\n wake()\n\n @staticmethod\n def home():\n home()\n\n @staticmethod\n def double_click(v):\n return double_click(v)\n\n @staticmethod\n def pinch(in_or_out='in', center=None, percent=0.5):\n pinch(in_or_out, center, percent)\n\n @staticmethod\n def key_event(keyname, **kwargs):\n keyevent(keyname, **kwargs)\n\n @staticmethod\n def text(txt, enter=True, **kwargs):\n text(txt, enter, **kwargs)\n\n @staticmethod\n def sleep(secs=1.0):\n sleep(secs)\n\n @staticmethod\n def find_all(v):\n return find_all(v)\n\n @staticmethod\n def get_clipboard(*args, **kwargs):\n return get_clipboard(*args, **kwargs)\n\n @staticmethod\n def set_clipboard(content, *args, **kwargs):\n set_clipboard(content, *args, **kwargs)" }, { "identifier": "Template", "path": "zafkiel/device/template.py", "snippet": "class ImageTemplate(Template):\n def __init__(\n self,\n filename: str,\n record_pos: tuple = None,\n keyword: Keyword = None,\n threshold: float = None,\n target_pos: int = TargetPos.MID,\n resolution: tuple = (1280, 720),\n rgb: bool = False,\n scale_max: int = 800,\n scale_step: float = 0.005,\n template_path: str = 'templates'\n ):\n def filepath(self) -> str:\n def name(self) -> str:\n def image(self) -> ndarray:\n def height(self) -> int:\n def width(self) -> int:\n def _has_border(self) -> bool:\n def ratio(self, screen_height: float = None) -> float:\n def area(self) -> tuple:" }, { "identifier": "Timer", "path": "zafkiel/timer.py", "snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n From https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/base/timer.py\n\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise, it goes wrong if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make program run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__" }, { "identifier": "simple_report", "path": "zafkiel/report.py", "snippet": "def simple_report(filepath, log_path=True, logfile=None, output=HTML_FILE):\n path, name = script_dir_name(filepath)\n if log_path is True:\n log_path = os.path.join(path, getattr(Config, \"LOG_DIR\", DEFAULT_LOG_DIR))\n rpt = HtmlReport(path, log_path, logfile=logfile or getattr(Config, \"LOG_FILE\", DEFAULT_LOG_FILE), script_name=name)\n rpt.report(HTML_TPL, output_file=output)" }, { "identifier": "Keyword", "path": "zafkiel/ocr/keyword.py", "snippet": "class Keyword:\n cn: str = ''\n cht: str = ''\n en: str = ''\n jp: str = ''\n # id: int # To be considered\n name: str = ''\n\n \"\"\"\n Instance attributes and methods\n TODO: Error handling for missing attributes\n \"\"\"\n\n @cached_property\n def ch(self) -> str:\n return self.cn\n\n @cached_property\n def cn_parsed(self) -> str:\n return parse_name(self.cn)\n\n @cached_property\n def en_parsed(self) -> str:\n return parse_name(self.en)\n\n @cached_property\n def jp_parsed(self) -> str:\n return parse_name(self.jp)\n\n @cached_property\n def cht_parsed(self) -> str:\n return parse_name(self.cht)\n\n def __str__(self):\n keyword_list = []\n for keyword in [self.cn, self.cht, self.en, self.jp]:\n if keyword != '':\n keyword_list.append(keyword)\n return f\"{self.__class__.__name__}({self.name})->{'/'.join(keyword_list)}\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True\n\n def keywords_to_find(self, lang: str = None, ignore_punctuation: bool = True):\n if lang is None:\n lang = Config.SERVER_LANG\n\n # TODO: fix this refer to SRC\n if lang == 'cn':\n if ignore_punctuation:\n return [self.cn_parsed]\n else:\n return [self.cn]\n elif lang == 'en':\n if ignore_punctuation:\n return [self.en_parsed]\n else:\n return [self.en]\n elif lang == 'jp':\n if ignore_punctuation:\n return [self.jp_parsed]\n else:\n return [self.jp]\n elif lang == 'cht':\n if ignore_punctuation:\n return [self.cht_parsed]\n else:\n return [self.cht]\n else:\n if ignore_punctuation:\n return [\n self.cn_parsed,\n self.en_parsed,\n self.jp_parsed,\n self.cht_parsed,\n ]\n else:\n return [\n self.cn,\n self.en,\n self.jp,\n self.cht,\n ]\n\n \"\"\"\n Class attributes and methods\n\n Note that dataclasses inherited `Keyword` must override `instances` attribute,\n or `instances` will still be a class attribute of base class.\n ```\n @dataclass\n class DungeonNav(Keyword):\n instances: ClassVar = {}\n ```\n \"\"\"\n # Key: instance name. Value: instance object.\n instances: ClassVar = {}\n\n def __post_init__(self):\n self.__class__.instances[self.name] = self\n\n @classmethod\n def _compare(cls, name, keyword):\n return name == keyword\n\n @classmethod\n def find(cls, name, lang: str = None, ignore_punctuation: bool = True):\n \"\"\"\n Args:\n name: Name in any server or instance id.\n lang: Lang to find from. None to search the names from current server only.\n ignore_punctuation: True to remove punctuations and turn into lowercase before searching.\n\n Returns:\n Keyword instance.\n\n Raises:\n ScriptError: If nothing found.\n \"\"\"\n # Already a keyword\n if isinstance(name, Keyword):\n return name\n\n # Probably a variable name\n if isinstance(name, str) and '_' in name:\n for instance in cls.instances.values():\n if name == instance.name:\n return instance\n # Probably an in-game name\n if ignore_punctuation:\n name = parse_name(name)\n else:\n name = str(name)\n instance: Keyword\n for instance in cls.instances.values():\n for keyword in instance.keywords_to_find(\n lang=lang, ignore_punctuation=ignore_punctuation):\n if cls._compare(name, keyword):\n return instance\n\n # Not found\n raise ScriptError(f'Cannot find a {cls.__name__} instance that matches \"{name}\"')" }, { "identifier": "Ocr", "path": "zafkiel/ocr/ocr.py", "snippet": "class Ocr:\n # Merge results with box distance <= thres\n merge_thres_x = 0\n merge_thres_y = 0\n\n def __init__(self, button: ImageTemplate, lang=None, name=None):\n \"\"\"\n Args:\n button:\n lang: If None, use in-game language\n name: If None, use button.name\n \"\"\"\n if lang is None:\n lang = Config.SERVER_LANG\n if name is None:\n name = button.name\n\n self.button: ImageTemplate = button\n self.lang: str = lang\n self.name: str = name\n\n @cached_property\n def model(self) -> TextSystem:\n return OCR_MODEL.get_by_lang(self.lang)\n\n @staticmethod\n def pre_process(image):\n \"\"\"\n To be overridden.\n \"\"\"\n return image\n\n @staticmethod\n def after_process(result):\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def format_result(self, result) -> str:\n \"\"\"\n To be overridden.\n \"\"\"\n return result\n\n def ocr_single_line(self, image):\n # pre process\n start_time = time.time()\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n result, _ = self.model.ocr_single_line(image)\n # after proces\n result = self.after_process(result)\n result = self.format_result(result)\n\n cost_time = time.time() - start_time\n logger.debug(f'OCR <{self.name}> cost {cost_time:.2f}s: {result}')\n return result\n\n def filter_detected(self, result: BoxedResult) -> bool:\n \"\"\"\n Return False to drop result.\n To be overridden.\n \"\"\"\n return True\n\n def detect_and_ocr(self, image, direct_ocr=False) -> list[BoxedResult]:\n \"\"\"\n Args:\n image:\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n\n \"\"\"\n # pre process\n start_time = time.time()\n if not direct_ocr:\n image = crop(image, self.button.area)\n image = self.pre_process(image)\n # ocr\n results: list[BoxedResult] = self.model.detect_and_ocr(image)\n # after proces\n for result in results:\n if not direct_ocr:\n result.box += self.button.area[:2]\n result.box = tuple(corner2area(result.box))\n\n results = [result for result in results if self.filter_detected(result)]\n results = merge_buttons(results, thres_x=self.merge_thres_x, thres_y=self.merge_thres_y)\n for result in results:\n result.ocr_text = self.after_process(result.ocr_text)\n\n cost_time = time.time() - start_time\n logger.debug(f\"OCR <{self.name}> cost {cost_time:.2f}s: {', '.join([result.ocr_text for result in results])}\")\n return results\n\n @staticmethod\n def _match_result(\n result: str,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True):\n \"\"\"\n Args:\n result (str):\n keyword_classes: A list of `Keyword` class or classes inherited `Keyword`\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n # Digits will be considered as the index of keyword\n if ignore_digit:\n if result.isdigit():\n return None\n\n # Try in current lang\n for keyword_class in keyword_classes:\n try:\n matched = keyword_class.find(\n result,\n lang=lang,\n ignore_punctuation=ignore_punctuation\n )\n return matched\n except ScriptError:\n continue\n\n return None\n\n def matched_single_line(\n self,\n image,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True\n ):\n \"\"\"\n Args:\n image: Image to detect\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n lang:\n ignore_punctuation:\n\n Returns:\n If matched, return `Keyword` object or objects inherited `Keyword`\n If not match, return None\n \"\"\"\n result = self.ocr_single_line(image)\n\n result = self._match_result(\n result,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n )\n\n logger.debug(f'<{self.name}> matched: {str(result)}')\n return result\n\n def _product_button(\n self,\n boxed_result: BoxedResult,\n keyword_classes,\n lang: str = None,\n ignore_punctuation=True,\n ignore_digit=True\n ) -> OcrResultButton:\n if not isinstance(keyword_classes, list):\n keyword_classes = [keyword_classes]\n\n matched_keyword = self._match_result(\n boxed_result.ocr_text,\n keyword_classes=keyword_classes,\n lang=lang,\n ignore_punctuation=ignore_punctuation,\n ignore_digit=ignore_digit,\n )\n button = OcrResultButton(boxed_result, matched_keyword)\n return button\n\n def matched_ocr(self, image, keyword_classes, direct_ocr=False) -> list[OcrResultButton]:\n \"\"\"\n Match all instances of 'keyword_classes' on the screen.\n\n Args:\n image: Screenshot\n keyword_classes: `Keyword` class or classes inherited `Keyword`, or a list of them.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n\n Returns:\n List of matched OcrResultButton.\n OCR result which didn't matched known keywords will be dropped.\n \"\"\"\n results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n results = [self._product_button(result, keyword_classes) for result in results]\n results = [result for result in results if result.is_keyword_matched]\n\n if results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return results\n\n def ocr_match_keyword(self, image, keyword_instance, direct_ocr=False, mode: int = OCR_EQUAL, threshold=0.75) \\\n -> list[OcrResultButton]:\n \"\"\"\n Match a specified keyword instance on the screen.\n\n Args:\n image: Screenshot\n keyword_instance: Instance of `Keyword` class or its subclass.\n direct_ocr: True to ignore `button` attribute and feed the image to OCR model without cropping.\n mode: Match rules, one of `OCR_EQUAL`, `OCR_CONTAINS`, `OCR_SIMILAR`.\n threshold: Similarity threshold, default 0.75, only work when mode is OCR_SIMILAR.\n\n Returns:\n List of matched OcrResultButton or empty list.\n \"\"\"\n boxed_results = self.detect_and_ocr(image, direct_ocr=direct_ocr)\n final_results = []\n for boxed_result in boxed_results:\n for keyword in keyword_instance.keywords_to_find():\n if mode == OCR_EQUAL and boxed_result.ocr_text != keyword:\n continue\n elif mode == OCR_CONTAINS and keyword not in boxed_result.ocr_text:\n continue\n elif mode == OCR_SIMILAR:\n similarity = SequenceMatcher(None, boxed_result.ocr_text, keyword).ratio()\n if similarity < threshold:\n continue\n button = OcrResultButton(boxed_result, keyword_instance)\n final_results.append(button)\n\n if final_results:\n logger.debug(f\"<{self.name}> matched: {', '.join([str(result) for result in final_results])}\")\n # else:\n # logger.debug(f\"<{self.name}> matching failed\")\n return final_results" }, { "identifier": "Digit", "path": "zafkiel/ocr/ocr.py", "snippet": "class Digit(Ocr):\n def __init__(self, button: ImageTemplate, lang='en', name=None):\n super().__init__(button, lang=lang, name=name)\n\n def format_result(self, result) -> int:\n \"\"\"\n Returns:\n int:\n \"\"\"\n result = super().after_process(result)\n # logger.attr(name=self.name, text=str(result))\n\n res = re.search(r'(\\d+)', result)\n if res:\n return int(res.group(1))\n else:\n # logger.warning(f'No digit found in {result}')\n return 0" }, { "identifier": "DigitCounter", "path": "zafkiel/ocr/ocr.py", "snippet": "class DigitCounter(Ocr):\n def __init__(self, button: ImageTemplate, lang='en', name=None):\n super().__init__(button, lang=lang, name=name)\n\n def format_result(self, result) -> tuple[int, int, int]:\n \"\"\"\n Do OCR on a counter, such as `14/15`, and returns 14, 1, 15\n\n Returns:\n int:\n \"\"\"\n result = super().after_process(result)\n # logger.attr(name=self.name, text=str(result))\n\n res = re.search(r'(\\d+)/(\\d+)', result)\n if res:\n groups = [int(s) for s in res.groups()]\n current, total = int(groups[0]), int(groups[1])\n # current = min(current, total)\n return current, total - current, total\n else:\n # logger.warning(f'No digit counter found in {result}')\n return 0, 0, 0" }, { "identifier": "Duration", "path": "zafkiel/ocr/ocr.py", "snippet": "class Duration(Ocr):\n @classmethod\n def timedelta_regex(cls, lang):\n regex_str = {\n 'cn': r'^(?P<prefix>.*?)'\n r'((?P<days>\\d{1,2})\\s*天\\s*)?'\n r'((?P<hours>\\d{1,2})\\s*小时\\s*)?'\n r'((?P<minutes>\\d{1,2})\\s*分钟\\s*)?'\n r'((?P<seconds>\\d{1,2})\\s*秒)?'\n r'(?P<suffix>[^天时钟秒]*?)$',\n 'en': r'^(?P<prefix>.*?)'\n r'((?P<days>\\d{1,2})\\s*d\\s*)?'\n r'((?P<hours>\\d{1,2})\\s*h\\s*)?'\n r'((?P<minutes>\\d{1,2})\\s*m\\s*)?'\n r'((?P<seconds>\\d{1,2})\\s*s)?'\n r'(?P<suffix>[^dhms]*?)$'\n }[lang]\n return re.compile(regex_str)\n\n def after_process(self, result):\n result = super().after_process(result)\n result = result.strip('.,。,')\n result = result.replace('Oh', '0h').replace('oh', '0h')\n return result\n\n def format_result(self, result: str) -> timedelta:\n \"\"\"\n Do OCR on a duration, such as `18d 2h 13m 30s`, `2h`, `13m 30s`, `9s`\n\n Returns:\n timedelta:\n \"\"\"\n matched = self.timedelta_regex(self.lang).search(result)\n if not matched:\n return timedelta()\n days = self._sanitize_number(matched.group('days'))\n hours = self._sanitize_number(matched.group('hours'))\n minutes = self._sanitize_number(matched.group('minutes'))\n seconds = self._sanitize_number(matched.group('seconds'))\n return timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds)\n\n @staticmethod\n def _sanitize_number(number) -> int:\n if number is None:\n return 0\n return int(number)" }, { "identifier": "OcrResultButton", "path": "zafkiel/ocr/ocr.py", "snippet": "class OcrResultButton:\n def __init__(self, boxed_result: BoxedResult, matched_keyword: Optional[Keyword]):\n \"\"\"\n Args:\n boxed_result: BoxedResult from ppocr-onnx\n matched_keyword: Keyword object or None\n \"\"\"\n self.area = boxed_result.box\n self.search = area_pad(self.area, pad=-20)\n # self.button = boxed_result.box\n\n if matched_keyword is not None:\n self.matched_keyword = matched_keyword\n self.name = str(matched_keyword)\n else:\n self.matched_keyword = None\n self.name = boxed_result.ocr_text\n\n self.text = boxed_result.ocr_text\n self.score = boxed_result.score\n\n @property\n def is_keyword_matched(self) -> bool:\n return self.matched_keyword is not None\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def __eq__(self, other):\n return str(self) == str(other)\n\n def __hash__(self):\n return hash(self.name)\n\n def __bool__(self):\n return True" }, { "identifier": "Page", "path": "zafkiel/ui/page.py", "snippet": "class Page:\n \"\"\"\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/page.py\n \"\"\"\n\n # Key: str, page name like \"page_main\"\n # Value: Page, page instance\n all_pages = {}\n\n @classmethod\n def clear_connection(cls):\n for page in cls.all_pages.values():\n page.parent = None\n\n @classmethod\n def init_connection(cls, destination: Page):\n \"\"\"Initialize an A* path finding among pages.\n\n Args:\n destination:\n \"\"\"\n cls.clear_connection()\n\n visited = [destination]\n visited = set(visited)\n while True:\n new = visited.copy()\n for page in visited:\n for link in cls.iter_pages():\n if link in visited:\n continue\n if page in link.links:\n link.parent = page\n new.add(link)\n if len(new) == len(visited):\n break\n visited = new\n\n @classmethod\n def iter_pages(cls, start_page: Page = None):\n pages = list(cls.all_pages.values())\n if start_page is not None and start_page in pages:\n # Move start_page to the front of the list\n pages.remove(start_page)\n pages.insert(0, start_page)\n cls.all_pages = {page.name: page for page in pages}\n return cls.all_pages.values()\n\n @classmethod\n def iter_check_buttons(cls):\n for page in cls.all_pages.values():\n yield page.check_button\n\n def __init__(self, check_button: Template, switch: Switch = None):\n self.check_button = check_button\n self.switch = switch\n self.links = {}\n (filename, line_number, function_name, text) = traceback.extract_stack()[-2]\n self.name = text[:text.find('=')].strip()\n self.parent = None\n Page.all_pages[self.name] = self\n\n def __eq__(self, other):\n return self.name == other.name\n\n def __hash__(self):\n return hash(self.name)\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def link(self, button: Template, destination: Page):\n self.links[destination] = button" }, { "identifier": "Switch", "path": "zafkiel/ui/switch.py", "snippet": "class Switch:\n \"\"\"\n A wrapper to handle switches in game, switch among states with retries.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n\n Examples:\n # Definitions\n submarine_hunt = Switch('Submarine_hunt', offset=120)\n submarine_hunt.add_state('on', check_button=Template(r\"assets/ON.png\"))\n submarine_hunt.add_state('off', check_button=Template(r\"assets/OFF.png\"))\n\n # Change state to ON\n submarine_view.set(TPL_ON)\n \"\"\"\n\n def __init__(self, name: str = 'Switch', is_selector: bool = False):\n \"\"\"\n Args:\n name:\n is_selector: True if this is a multi choice, click to choose one of the switches.\n For example: | [Daily] | Urgent | -> click -> | Daily | [Urgent] |\n False if this is a switch, click the switch itself, and it changed in the same position.\n For example: | [ON] | -> click -> | [OFF] |\n \"\"\"\n self.name = name\n self.is_choice = is_selector\n self.state_list = []\n\n def __str__(self):\n return self.name\n\n __repr__ = __str__\n\n def add_state(self, state: str, check_button: Template, click_button: Template = None):\n \"\"\"\n Args:\n state: Must match check_button.name\n check_button:\n click_button:\n \"\"\"\n self.state_list.append({\n 'state': state,\n 'check_button': check_button,\n 'click_button': click_button if click_button is not None else check_button,\n })\n\n def get_data(self, state: Template) -> dict:\n \"\"\"\n Args:\n state:\n\n Returns:\n Dictionary in add_state\n\n Raises:\n ScriptError: If state invalid\n \"\"\"\n for row in self.state_list:\n if row['state'] == state.name:\n return row\n\n raise ScriptError(f'Switch {self.name} received an invalid state {state}')" }, { "identifier": "UI", "path": "zafkiel/ui/ui.py", "snippet": "class UI(API):\n \"\"\"\n Processing interface related functions.\n Main code comes from https://github.com/LmeSzinc/StarRailCopilot/blob/master/tasks/base/ui.py\n and https://github.com/LmeSzinc/StarRailCopilot/blob/master/module/ui/switch.py\n \"\"\"\n\n # Make ui_current mutable so that it can be shared among subclasses of the UI class.\n ui_current: dict = {'page': None}\n popup_list: list = []\n\n def ui_switch_appear(self, switch: Switch) -> bool:\n \"\"\"\n Args:\n switch:\n \"\"\"\n if self.ui_get_current_page().switch != switch:\n return False\n\n for data in switch.state_list:\n if self.exists(data['check_button']):\n return True\n return False\n\n def ui_get_current_state(self, switch: Switch) -> str:\n \"\"\"\n Args:\n switch:\n\n Returns:\n state name or 'unknown'.\n \"\"\"\n if self.ui_current['page'].switch != switch:\n logger.warning(f\"{self.ui_current['page']} does not have {switch}\")\n return 'unknown'\n\n for data in switch.state_list:\n if self.exists(data['check_button']):\n return data['state']\n return 'unknown'\n\n def ui_page_appear(self, page: Page, timeout: float = 0) -> bool or tuple:\n \"\"\"\n Args:\n page:\n timeout: Seconds to find.\n\n Returns:\n If found, return tuple of (x, y), else return False.\n \"\"\"\n return self.exists(page.check_button, timeout)\n\n def ui_get_current_page(self):\n \"\"\"\n Returns:\n Page:\n\n Raises:\n NotRunningError:\n PageUnknownError:\n \"\"\"\n\n @run_once\n def app_check():\n if not self.app_is_running():\n raise NotRunningError(\"Game not running\")\n\n timeout = Timer(10, count=20).start()\n while True:\n\n # End\n if timeout.reached():\n break\n\n # Known pages\n for page in Page.iter_pages():\n if page.check_button is None:\n continue\n if self.ui_page_appear(page=page):\n self.ui_current['page'] = page\n return page\n\n # Unknown page but able to handle\n if self.ui_additional():\n timeout.reset()\n continue\n\n app_check()\n\n # Unknown page, need manual switching\n raise PageUnknownError\n\n def _set_state(self, switch: Switch, state: Template) -> bool:\n counter = 0\n changed = False\n warning_show_timer = Timer(5, count=10).start()\n click_timer = Timer(1, count=3)\n while True:\n\n # Detect\n current = self.ui_get_current_state(switch)\n\n # End\n if current == state.name:\n logger.info(f'{switch.name} set to {state.name}')\n return changed\n\n # Warning\n if current == 'unknown':\n if self.ui_additional():\n continue\n if warning_show_timer.reached():\n logger.warning(f'Unknown {switch.name} switch')\n warning_show_timer.reset()\n if counter >= 1:\n logger.warning(\n f'{switch.name} switch {state.name} asset has evaluated to unknown too many times, '\n f'asset should be re-verified')\n return False\n counter += 1\n continue\n\n # Click\n if click_timer.reached():\n click_state = state if switch.is_choice else current\n button = switch.get_data(click_state)['click_button']\n self.touch(button)\n click_timer.reset()\n changed = True\n\n return changed\n\n def ui_goto(self, destination: Page, state: Template = None):\n \"\"\"\n Args:\n destination:\n state: Target state of switch, which must be in destination page.\n \"\"\"\n\n # check if state is valid\n if state is not None:\n if destination.switch is None:\n raise ScriptError(f'Page {destination} has no switch')\n destination.switch.get_data(state)\n\n logger.debug(f\"------ UI GOTO {str(destination).upper()}:{state.name.upper()} ------\")\n else:\n logger.debug(f\"------ UI GOTO {str(destination).upper()} ------\")\n\n # Create connection\n Page.init_connection(destination)\n\n while True:\n\n # Destination page\n if self.ui_page_appear(destination, timeout=0.5):\n self.ui_current['page'] = destination\n logger.debug(f'Page arrive: {destination}')\n if state is not None:\n self._set_state(destination.switch, state)\n break\n\n # Other pages\n clicked = False\n for page in Page.iter_pages(start_page=self.ui_current['page']):\n if page.parent is None or page.check_button is None:\n continue\n if self.exists(page.check_button):\n self.ui_current['page'] = page\n button = page.links[page.parent]\n self.touch(button)\n logger.info(f'Page switch: {page} -> {page.parent}')\n clicked = True\n break\n if clicked:\n continue\n\n # Additional\n if self.ui_additional():\n continue\n\n # Reset connection\n Page.clear_connection()\n\n def ui_ensure(self, destination: Page, state: Template = None) -> bool:\n \"\"\"\n Args:\n destination:\n state: Target state of switch, which must be in destination page.\n\n Returns:\n bool: If UI switched.\n \"\"\"\n self.ui_get_current_page()\n\n if self.ui_current['page'] == destination:\n if state is not None:\n if self.ui_get_current_state(destination.switch) == state.name:\n logger.debug(f\"Arrived at {destination}:{state.name}\")\n return False\n else:\n self._set_state(destination.switch, state)\n return True\n else:\n logger.debug(f\"Already at {destination}\")\n return False\n else:\n self.ui_goto(destination, state)\n return True\n\n def ui_ensure_index(\n self,\n index: int,\n letter: Ocr or callable,\n next_button: Template,\n prev_button: Template,\n fast: bool = True,\n interval: float = 0.2\n ):\n \"\"\"\n For pages with similar layout, ensure index of target page.\n\n Args:\n index: Index of target page.\n letter: OCR button.\n next_button:\n prev_button:\n fast: Default true. False when index is not continuous.\n interval: Seconds between two click.\n \"\"\"\n retry = Timer(1, count=2)\n while True:\n if isinstance(letter, Ocr):\n current = letter.ocr_single_line(self.screenshot())\n else:\n current = letter(self.screenshot())\n\n logger.info(f\"{self.ui_current['page']}: Index {current}\")\n diff = index - current\n if diff == 0:\n break\n if current == 0:\n logger.warning(f'ui_ensure_index got an empty current value: {current}')\n continue\n\n if retry.reached():\n button = next_button if diff > 0 else prev_button\n if fast:\n self.touch(button, times=abs(diff), interval=interval)\n else:\n self.touch(button)\n retry.reset()\n\n def get_popup_list(self, popups: list):\n \"\"\"\n Get list from program, must be called before self.ui_additional().\n\n Args:\n popups: list of handle popup functions\n \"\"\"\n for popup in popups:\n self.popup_list.append(popup)\n\n def ui_additional(self) -> bool:\n \"\"\"\n Handle all possible popups during UI switching.\n\n Returns:\n If handled any popup.\n \"\"\"\n for popup in self.popup_list:\n if popup():\n return True\n\n return False\n\n def to_json(self) -> dict:\n # May not be actual current page\n return {'ui_current': str(self.ui_current['page'])}" } ]
from zafkiel import API, Template, logger, Timer, simple_report, Config from zafkiel.ocr import Keyword, Ocr, Digit, DigitCounter, Duration, OcrResultButton from zafkiel.ui import Page, Switch, UI
12,426
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch
# Auto import test Keyword Ocr Digit DigitCounter Duration OcrResultButton Page Switch
UI
14
2023-11-12 09:33:35+00:00
16k
doodledood/chat-flock
chatflock/use_cases/bshr.py
[ { "identifier": "InMemoryChatDataBackingStore", "path": "chatflock/backing_stores/in_memory.py", "snippet": "class InMemoryChatDataBackingStore(ChatDataBackingStore):\n messages: List[ChatMessage]\n participants: Dict[str, ChatParticipant]\n last_message_id: Optional[int] = None\n\n def __init__(\n self, messages: Optional[List[ChatMessage]] = None, participants: Optional[List[ChatParticipant]] = None\n ):\n self.messages = messages or []\n self.participants = {participant.name: participant for participant in (participants or [])}\n self.last_message_id = None if len(self.messages) == 0 else self.messages[-1].id\n\n def get_messages(self) -> List[ChatMessage]:\n return self.messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n self.last_message_id = self.last_message_id + 1 if self.last_message_id is not None else 1\n\n message = ChatMessage(\n id=self.last_message_id,\n sender_name=sender_name,\n content=content,\n timestamp=timestamp or datetime.datetime.now(),\n )\n\n self.messages.append(message)\n\n return message\n\n def clear_messages(self):\n self.messages = []\n self.last_message_id = None\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n participants = list(self.participants.values())\n active_participants = [\n participant for participant in participants if isinstance(participant, ActiveChatParticipant)\n ]\n\n return active_participants\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n participants = list(self.participants.values())\n participants = [\n participant for participant in participants if not isinstance(participant, ActiveChatParticipant)\n ]\n\n return participants\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if not isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n if name not in self.participants:\n return None\n\n participant = self.participants[name]\n if isinstance(participant, ActiveChatParticipant):\n return None\n\n return participant\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if participant.name in self.participants:\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.participants[participant.name] = participant\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n if participant.name not in self.participants:\n raise ChatParticipantNotJoinedToChatError(participant.name)\n\n self.participants.pop(participant.name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return isinstance(participant, ActiveChatParticipant)\n\n return False\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n if participant_name in self.participants:\n participant = self.participants[participant_name]\n return not isinstance(participant, ActiveChatParticipant)\n\n return False" }, { "identifier": "LangChainMemoryBasedChatDataBackingStore", "path": "chatflock/backing_stores/langchain.py", "snippet": "class LangChainMemoryBasedChatDataBackingStore(InMemoryChatDataBackingStore):\n no_output_message: str = \"##NO_OUTPUT##\"\n\n def __init__(\n self,\n memory: BaseChatMemory,\n memory_key_getter: Optional[Callable[[BaseChatMemory], str]] = None,\n messages: Optional[List[ChatMessage]] = None,\n include_timestamp_in_messages: bool = False,\n participants: Optional[List[ChatParticipant]] = None,\n ):\n super().__init__(participants=participants)\n\n self.memory = memory\n self.include_timestamp_in_messages = include_timestamp_in_messages\n\n if memory_key_getter is None:\n\n def default_memory_key_getter(memory: BaseChatMemory) -> str:\n if hasattr(memory, \"memory_key\"):\n return str(memory.memory_key)\n\n return self.memory.output_key or \"history\"\n\n self.memory_key_getter: Callable[[BaseChatMemory], str] = default_memory_key_getter\n else:\n self.memory_key_getter = memory_key_getter\n\n def get_messages(self) -> List[ChatMessage]:\n prev_return_messages = self.memory.return_messages\n\n self.memory.return_messages = True\n\n memory_key = self.memory_key_getter(self.memory)\n base_messages = self.memory.load_memory_variables({})[memory_key]\n chat_messages = [\n base_message_to_chat_message(base_message)\n for base_message in base_messages\n if base_message.content != self.no_output_message\n ]\n\n self.memory.return_messages = prev_return_messages\n\n return chat_messages\n\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime.datetime] = None) -> ChatMessage:\n message = super().add_message(sender_name=sender_name, content=content)\n\n prefix = \"\"\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n prefix = f\"[{pretty_datetime}] \"\n\n self.memory.save_context(\n {\"input\": f\"{prefix}{message.id}. {message.sender_name}: {message.content}\"},\n {\"output\": self.no_output_message},\n )\n\n return message\n\n def clear_messages(self):\n super().clear_messages()\n\n self.memory.clear()" }, { "identifier": "Chat", "path": "chatflock/base.py", "snippet": "class Chat:\n backing_store: ChatDataBackingStore\n renderer: ChatRenderer\n name: Optional[str] = None\n max_total_messages: Optional[int] = None\n hide_messages: bool = False\n\n def __init__(\n self,\n backing_store: ChatDataBackingStore,\n renderer: ChatRenderer,\n initial_participants: Optional[Sequence[ChatParticipant]] = None,\n name: Optional[str] = None,\n max_total_messages: Optional[int] = None,\n hide_messages: bool = False,\n ):\n if max_total_messages is not None and max_total_messages <= 0:\n raise ValueError(\"Max total messages must be None or greater than 0.\")\n\n self.backing_store = backing_store\n self.renderer = renderer\n self.name = name\n self.hide_messages = hide_messages\n self.max_total_messages = max_total_messages\n\n for i, participant in enumerate(initial_participants or []):\n self.add_participant(participant)\n\n def add_participant(self, participant: ChatParticipant) -> None:\n if self.has_active_participant_with_name(participant.name) or self.has_non_active_participant_with_name(\n participant.name\n ):\n raise ChatParticipantAlreadyJoinedToChatError(participant.name)\n\n self.backing_store.add_participant(participant)\n\n all_participants = (\n self.backing_store.get_active_participants() + self.backing_store.get_non_active_participants()\n )\n for participant in all_participants:\n participant.on_participant_joined_chat(chat=self, participant=participant)\n\n def remove_participant(self, participant: ChatParticipant) -> None:\n self.backing_store.remove_participant(participant)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_participant_left_chat(chat=self, participant=participant)\n\n def add_message(self, sender_name: str, content: str) -> None:\n sender = self.backing_store.get_active_participant_by_name(sender_name)\n if sender is None:\n raise ChatParticipantNotJoinedToChatError(sender_name)\n\n message = self.backing_store.add_message(sender_name=sender_name, content=content)\n\n self.renderer.render_new_chat_message(chat=self, message=message)\n\n active_participants = self.backing_store.get_active_participants()\n non_active_participants = self.backing_store.get_non_active_participants()\n all_participants = active_participants + non_active_participants\n\n for participant in all_participants:\n participant.on_new_chat_message(chat=self, message=message)\n\n def get_messages(self) -> List[ChatMessage]:\n return self.backing_store.get_messages()\n\n def clear_messages(self):\n self.backing_store.clear_messages()\n\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n return self.backing_store.get_active_participants()\n\n def get_non_active_participants(self) -> List[ChatParticipant]:\n return self.backing_store.get_non_active_participants()\n\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n return self.backing_store.get_active_participant_by_name(name=name)\n\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n return self.backing_store.get_non_active_participant_by_name(name=name)\n\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_active_participant_with_name(participant_name=participant_name)\n\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n return self.backing_store.has_non_active_participant_with_name(participant_name=participant_name)\n\n @property\n def active_participants_str(self):\n return \"\\n\\n\".join([participant.detailed_str() for participant in self.get_active_participants()])" }, { "identifier": "ChatDataBackingStore", "path": "chatflock/base.py", "snippet": "class ChatDataBackingStore(abc.ABC):\n @abc.abstractmethod\n def get_messages(self) -> List[ChatMessage]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def add_message(self, sender_name: str, content: str, timestamp: Optional[datetime] = None) -> ChatMessage:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def clear_messages(self) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_active_participants(self) -> List[ActiveChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_non_active_participants(self) -> List[ChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_active_participant_by_name(self, name: str) -> Optional[ActiveChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def get_non_active_participant_by_name(self, name: str) -> Optional[ChatParticipant]:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def add_participant(self, participant: ChatParticipant) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def remove_participant(self, participant: ChatParticipant) -> None:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def has_active_participant_with_name(self, participant_name: str) -> bool:\n raise NotImplementedError()\n\n @abc.abstractmethod\n def has_non_active_participant_with_name(self, participant_name: str) -> bool:\n raise NotImplementedError()" }, { "identifier": "RoundRobinChatConductor", "path": "chatflock/conductors/round_robin.py", "snippet": "class RoundRobinChatConductor(ChatConductor):\n def select_next_speaker(self, chat: Chat) -> Optional[ActiveChatParticipant]:\n active_participants = chat.get_active_participants()\n if len(active_participants) <= 0:\n return None\n\n messages = chat.get_messages()\n last_message = messages[-1] if len(messages) > 0 else None\n\n if last_message is not None and self.is_termination_message(last_message):\n return None\n\n last_speaker = last_message.sender_name if last_message is not None else None\n if last_speaker is None:\n return next(iter(active_participants))\n\n # Rotate to the next participant in the list.\n participant_names = [participant.name for participant in active_participants]\n\n if last_speaker not in participant_names:\n next_speaker_name = participant_names[0]\n else:\n last_speaker_index = participant_names.index(last_speaker)\n next_speaker_index = (last_speaker_index + 1) % len(participant_names)\n next_speaker_name = participant_names[next_speaker_index]\n\n next_speaker = chat.get_active_participant_by_name(next_speaker_name)\n if next_speaker is None or not isinstance(next_speaker, ActiveChatParticipant):\n raise ChatParticipantNotJoinedToChatError(next_speaker_name)\n\n return next_speaker\n\n def get_chat_result(self, chat: \"Chat\") -> str:\n result = super().get_chat_result(chat=chat)\n\n try:\n idx = result.rindex(\"TERMINATE\")\n result = result[:idx].strip()\n except ValueError:\n result = result.strip()\n\n return result\n\n def is_termination_message(self, message: ChatMessage) -> bool:\n return message.content.strip().endswith(\"TERMINATE\")" }, { "identifier": "chat_messages_to_pydantic", "path": "chatflock/parsing_utils.py", "snippet": "def chat_messages_to_pydantic(\n chat_messages: Sequence[ChatMessage],\n chat_model: BaseChatModel,\n output_schema: Type[TOutputSchema],\n spinner: Optional[Halo] = None,\n n_tries: int = 3,\n hide_message: bool = True,\n) -> TOutputSchema:\n text_to_json_ai = LangChainBasedAIChatParticipant(\n chat_model=chat_model,\n name=\"Jason\",\n role=\"JSON Converter\",\n symbol=\"📄\",\n personal_mission=\"Your only purpose is to convert the previous chat messages (usually the last one)\"\n \"to a valid and logical JSON that follows the JSON SCHEMA provided. Your message should \"\n \"include only correct JSON. No fluff.\",\n other_prompt_sections=[Section(name=\"JSON SCHEMA\", text=str(pydantic_to_json_schema(output_schema)))],\n ignore_group_chat_environment=True,\n spinner=spinner,\n )\n json_parser = JSONOutputParserChatParticipant(output_schema=output_schema)\n\n # Remove TERMINATE if present so the chat conductor doesn't end the chat prematurely\n if len(chat_messages) > 0:\n chat_messages = list(chat_messages).copy()\n last_message = chat_messages[-1]\n\n try:\n # Chop the content at the last instance of the word TERMINATE in the content\n idx = last_message.content.rindex(\"TERMINATE\")\n new_content = last_message.content[:idx].strip()\n\n last_message = ChatMessage(id=last_message.id, sender_name=last_message.sender_name, content=new_content)\n\n chat_messages[-1] = last_message\n except ValueError:\n pass\n\n parser_chat = Chat(\n backing_store=InMemoryChatDataBackingStore(messages=list(chat_messages)),\n renderer=NoChatRenderer(),\n initial_participants=[text_to_json_ai, json_parser],\n hide_messages=hide_message,\n max_total_messages=len(chat_messages) + 1 + (n_tries - 1) * 2,\n )\n conductor = RoundRobinChatConductor()\n\n _ = conductor.initiate_dialog(chat=parser_chat)\n\n if json_parser.output is None:\n raise MessageCouldNotBeParsedError(\"An output could not be parsed from the chat messages.\")\n\n return json_parser.output" }, { "identifier": "LangChainBasedAIChatParticipant", "path": "chatflock/participants/langchain.py", "snippet": "class LangChainBasedAIChatParticipant(ActiveChatParticipant):\n class Config:\n arbitrary_types_allowed = True\n\n def __init__(\n self,\n name: str,\n chat_model: BaseChatModel,\n symbol: str = \"🤖\",\n role: str = \"AI Assistant\",\n personal_mission: str = \"Be a helpful AI assistant.\",\n other_prompt_sections: Optional[List[Section]] = None,\n retriever: Optional[BaseRetriever] = None,\n tools: Optional[List[BaseTool]] = None,\n chat_model_args: Optional[Dict[str, Any]] = None,\n spinner: Optional[Halo] = None,\n ignore_group_chat_environment: bool = False,\n include_timestamp_in_messages: bool = False,\n **kwargs: Any,\n ):\n super().__init__(name=name, symbol=symbol, **kwargs)\n\n self.role = role\n self.chat_model = chat_model\n self.chat_model_args = chat_model_args or {}\n self.other_prompt_sections = other_prompt_sections or []\n self.ignore_group_chat_environment = ignore_group_chat_environment\n self.include_timestamp_in_messages = include_timestamp_in_messages\n self.retriever = retriever\n self.tools = tools\n self.spinner = spinner\n self.personal_mission = personal_mission\n\n def create_system_message(self, chat: \"Chat\", relevant_docs: Sequence[Document]) -> str:\n now = datetime.now()\n pretty_datetime = now.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n base_sections = [\n Section(name=\"Current Time\", text=pretty_datetime),\n Section(name=\"Name\", text=self.name),\n Section(name=\"Role\", text=self.role),\n Section(name=\"Personal Mission\", text=self.personal_mission),\n Section(\n name=\"Additional Context for Response\",\n text=\"None\"\n if len(relevant_docs) == 0\n else \"The following documents may be relevant for your response, only use \"\n \"them for context for a better response, if applicable\",\n sub_sections=[\n Section(name=f\"Document {i + 1}\", text=f\"```{doc.page_content}```\")\n for i, doc in enumerate(relevant_docs)\n ],\n ),\n Section(\n name=\"Response Message Format\",\n list=[\n \"Your response should be the message you want to send to the group chat as your own name, \"\n \"role, and personal mission.\",\n \"Must not include any prefix (e.g., timestamp, sender name, etc.).\",\n \"Response must be a message as will be shown in the chat (timestamp and sender name are \"\n \"system-generated for you).\",\n ],\n sub_sections=[\n Section(name=\"Well-Formatted Chat Response Examples\", list=['\"Hello, how are you?\"']),\n Section(\n name=\"Badly-Formatted Chat Response Examples\",\n list=[\n (\n '\"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else '\"John: Hello, how are you?\"'\n ),\n ],\n ),\n ],\n ),\n ]\n\n active_participants = chat.get_active_participants()\n if self.ignore_group_chat_environment:\n system_message = StructuredString(sections=[*base_sections, *self.other_prompt_sections])\n else:\n system_message = StructuredString(\n sections=[\n *base_sections,\n Section(\n name=\"Chat\",\n sub_sections=[\n Section(name=\"Name\", text=chat.name or \"No name provided. Just a general chat.\"),\n Section(\n name=\"Participants\",\n text=\"\\n\".join(\n [\n f'- {str(p)}{\" -> This is you.\" if p.name == self.name else \"\"}'\n for p in active_participants\n ]\n ),\n ),\n Section(\n name=\"Guidelines\",\n list=[\n \"Your personal mission is the most important thing to you. You should always \"\n \"prioritize it.\",\n \"If a chat goal is provided, you should still follow your personal mission but \"\n \"in a way that helps the group achieve the chat goal.\",\n \"If you are the only participant in the chat, you should act as if the chat is now \"\n \"a scratch pad for you to write down your thoughts, ideas, and work on your \"\n \"mission by yourself. \"\n \"In the messages do not refer to another entity, but rather to yourself \"\n \"(I instead of You); the messages should read and sound like \"\n \"your internal thoughts and should be succinct, unless they are concrete work \"\n \"(for example, implementing something, calculating things, etc.). \"\n \"You have all the time in the world to build your thoughts, ideas, and do the \"\n \"work needed. The chat is now your place to think and iterate on your mission and \"\n \" achieve it.\",\n ],\n ),\n Section(\n name=\"Rules\",\n list=[\n \"You do not have to respond directly to the one who sent you a message. You can respond \"\n \"to anyone in the group chat.\",\n \"You cannot have private conversations with other participants. Everyone can see all \"\n \"messages sent by all other participants.\",\n ],\n ),\n Section(\n name=\"Previous Chat Messages\",\n list=[\n \"Messages are prefixed by a timestamp and the sender's name (could also be everyone). \",\n \"The prefix is for context only; it's not actually part of the message they sent. \",\n (\n 'Example: \"[TIMESTAMP] John: Hello, how are you?\"'\n if self.include_timestamp_in_messages\n else 'Example: \"John: Hello, how are you?\"'\n ),\n \"Some messages could have been sent by participants who are no longer a part of this \"\n \"conversation. Use their contents for context only; do not talk to them.\",\n \"In your response only include the message without the prefix.\",\n \"If you are the only participant in the chat, the previous chat messages are your \"\n \" memories or internal thoughts instead.\",\n ],\n ),\n ],\n ),\n *self.other_prompt_sections,\n ]\n )\n\n return str(system_message)\n\n def chat_messages_to_chat_model_messages(\n self, chat_messages: Sequence[ChatMessage], active_participants: Sequence[ActiveChatParticipant]\n ) -> List[BaseMessage]:\n messages: List[BaseMessage] = []\n for i, message in enumerate(chat_messages):\n if self.include_timestamp_in_messages:\n pretty_datetime = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n content = f\"[{pretty_datetime}] \"\n else:\n content = \"\"\n\n if self.ignore_group_chat_environment:\n content += f\"{message.sender_name}: {message.content}\"\n else:\n content += message.content\n\n if message.sender_name == self.name:\n if len(active_participants) > 1 or i == len(active_participants) - 1:\n messages.append(AIMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n else:\n messages.append(HumanMessage(content=content))\n\n if len(messages) == 0:\n messages.append(HumanMessage(content=f\"SYSTEM: The chat has started.\"))\n\n return messages\n\n def respond_to_chat(self, chat: Chat) -> str:\n if self.spinner is not None:\n self.spinner.start(text=f\"{str(self)} is thinking...\")\n\n chat_messages = chat.get_messages()\n\n if self.retriever is not None and len(chat_messages) > 0:\n relevant_docs = self.get_relevant_docs(messages=chat_messages)\n else:\n relevant_docs = []\n\n system_message = self.create_system_message(chat=chat, relevant_docs=relevant_docs)\n\n active_participants = chat.get_active_participants()\n all_messages = self.chat_messages_to_chat_model_messages(chat_messages, active_participants)\n all_messages = [SystemMessage(content=system_message), *all_messages]\n\n message_content = self.execute_messages(messages=all_messages)\n\n if self.spinner is not None:\n self.spinner.stop()\n\n potential_prefix = f\"{self.name}:\"\n if message_content.startswith(potential_prefix):\n message_content = message_content[len(potential_prefix) :].strip()\n\n return message_content\n\n def get_relevant_docs(self, messages: Sequence[ChatMessage]) -> List[Document]:\n if self.retriever is None:\n return []\n\n return self.retriever.get_relevant_documents(query=messages[-1].content)\n\n def execute_messages(self, messages: Sequence[BaseMessage]) -> str:\n return execute_chat_model_messages(\n messages=messages,\n chat_model=self.chat_model,\n tools=self.tools,\n spinner=self.spinner,\n chat_model_args=self.chat_model_args,\n )\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n\n tool_names = \", \".join([tool.name for tool in self.tools or []])\n if tool_names == \"\":\n tool_names = \"None\"\n\n return (\n f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\\n\"\n f'{prefix} Personal Mission: \"{self.personal_mission}\"\\n{prefix} Tools: {tool_names}'\n )" }, { "identifier": "UserChatParticipant", "path": "chatflock/participants/user.py", "snippet": "class UserChatParticipant(ActiveChatParticipant):\n def __init__(self, name: str = \"User\", role: str = \"User\", symbol: str = \"👤\", **kwargs: Any):\n super().__init__(name, messages_hidden=True, **kwargs)\n\n self.role = role\n self.symbol = symbol\n\n def respond_to_chat(self, chat: Chat) -> str:\n return input(f\"{self.symbol} ({self.name}): \")\n\n def __str__(self) -> str:\n return f\"{self.symbol} {self.name} ({self.role})\"\n\n def detailed_str(self, level: int = 0) -> str:\n prefix = \" \" * level\n return f\"{prefix}- Name: {self.name}\\n{prefix} Role: {self.role}\\n{prefix} Symbol: {self.symbol}\"" }, { "identifier": "TerminalChatRenderer", "path": "chatflock/renderers/terminal.py", "snippet": "class TerminalChatRenderer(ChatRenderer):\n def __init__(self, print_timestamps: bool = False):\n self.print_timestamps = print_timestamps\n\n def render_new_chat_message(self, chat: Chat, message: ChatMessage) -> None:\n if chat.hide_messages:\n return\n\n pretty_timestamp_with_date = message.timestamp.strftime(\"%m-%d-%Y %H:%M:%S\")\n\n sender = chat.get_active_participant_by_name(message.sender_name)\n if sender is None:\n symbol = \"❓\"\n\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {symbol} {message.sender_name}: {message.content}\")\n else:\n print(f\"{symbol} {message.sender_name}: {message.content}\")\n else:\n if sender.messages_hidden:\n return\n\n if chat.name is None:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {str(sender)}: {message.content}\")\n else:\n print(f\"{str(sender)}: {message.content}\")\n else:\n if self.print_timestamps:\n print(f\"[{pretty_timestamp_with_date}] {chat.name} > {str(sender)}: {message.content}\")\n else:\n print(f\"{chat.name} > {str(sender)}: {message.content}\")" }, { "identifier": "SequentialProcess", "path": "chatflock/sequencial_process/sequential_process.py", "snippet": "class SequentialProcess(Generic[T]):\n def __init__(self, steps: Sequence[Step[T]], initial_state: T, save_state: Callable[[T], None]):\n self.steps = steps\n self.state = initial_state\n self.save_state_func = save_state\n\n def save_state(self, state: T) -> None:\n self.save_state_func(state)\n\n def run(self) -> T:\n for step in self.steps:\n if step.on_step_start:\n step.on_step_start(self.state)\n\n try:\n for new_state in step.run(self.state) or []:\n self.state = new_state\n self.save_state(state=self.state)\n\n self.save_state(state=self.state)\n\n if step.on_step_completed:\n step.on_step_completed(self.state)\n except Exception as e:\n if step.on_step_failed:\n step.on_step_failed(self.state)\n\n raise e\n\n return self.state" }, { "identifier": "Step", "path": "chatflock/sequencial_process/sequential_process.py", "snippet": "class Step(Generic[T]):\n def __init__(\n self,\n name: str,\n func: Callable[[T, Any], Optional[Generator[T, None, None]]],\n on_step_start: Optional[Callable[[T], None]] = None,\n on_step_completed: Optional[Callable[[T], None]] = None,\n on_step_failed: Optional[Callable[[T], None]] = None,\n ):\n self.name = name\n self.func = func\n self.on_step_start = on_step_start\n self.on_step_completed = on_step_completed\n self.on_step_failed = on_step_failed\n\n def run(self, state: T, **kwargs: Any) -> Generator[T, None, None]:\n res = self.func(state, **kwargs) # type: ignore\n if res is None:\n return\n\n for new_state in res:\n yield new_state" }, { "identifier": "Section", "path": "chatflock/structured_string.py", "snippet": "class Section:\n name: str\n text: Optional[str] = None\n list: Optional[List[str]] = None\n sub_sections: Optional[List[\"Section\"]] = None\n list_item_prefix: Optional[str] = \"-\"\n uppercase_name: bool = True\n\n def to_text(self, level: int = 0) -> str:\n result = f'{\"#\" * (level + 1)} {self.name.upper() if self.uppercase_name else self.name}'\n\n if self.text is not None:\n result += \"\\n\" + self.text\n\n if self.list is not None:\n result += \"\\n\" + \"\\n\".join(\n [\n f'{self.list_item_prefix if self.list_item_prefix else str(i + 1) + \".\"} {item}'\n for i, item in enumerate(self.list)\n ]\n )\n\n if self.sub_sections is not None:\n for sub_section in self.sub_sections:\n result += \"\\n\\n\" + sub_section.to_text(level + 1)\n\n return result" }, { "identifier": "StructuredString", "path": "chatflock/structured_string.py", "snippet": "class StructuredString:\n sections: List[Section]\n\n def __getitem__(self, item: str) -> Section:\n if not isinstance(item, str):\n raise TypeError(f\"Item must be of type str, not {type(item)}.\")\n\n relevant_sections = [section for section in self.sections if section.name == item]\n if len(relevant_sections) == 0:\n raise KeyError(f\"No section with name {item} exists.\")\n\n return relevant_sections[0]\n\n def __setitem__(self, key: str, value: Section) -> None:\n if not isinstance(key, str):\n raise TypeError(f\"Key must be of type str, not {type(key)}.\")\n\n if not isinstance(value, Section):\n raise TypeError(f\"Value must be of type Section, not {type(value)}.\")\n\n try:\n section = self[key]\n\n # Remove old section and replace with new one, in the same place\n self.sections.insert(self.sections.index(section), value)\n self.sections.remove(section)\n except KeyError:\n self.sections.append(value)\n\n def __str__(self) -> str:\n result = \"\"\n for section in self.sections:\n result += section.to_text() + \"\\n\\n\"\n\n return result\n\n def __repr__(self) -> str:\n return self.__str__()" }, { "identifier": "get_response", "path": "chatflock/use_cases/request_response.py", "snippet": "def get_response(\n query: str,\n answerer: ActiveChatParticipant,\n backing_store: Optional[ChatDataBackingStore] = None,\n renderer: Optional[ChatRenderer] = None,\n) -> Tuple[str, Chat]:\n user = UserChatParticipant(name=\"User\")\n participants = [user, answerer]\n\n chat = Chat(\n backing_store=backing_store or InMemoryChatDataBackingStore(),\n renderer=renderer or NoChatRenderer(),\n initial_participants=participants,\n max_total_messages=2,\n )\n\n chat_conductor = RoundRobinChatConductor()\n answer = chat_conductor.initiate_dialog(chat=chat, initial_message=query, from_participant=user)\n\n return answer, chat" }, { "identifier": "WebSearch", "path": "chatflock/web_research/web_research.py", "snippet": "class WebSearch:\n def __init__(\n self,\n chat_model: BaseChatModel,\n search_results_provider: SearchResultsProvider,\n page_query_analyzer: PageQueryAnalyzer,\n skip_results_if_answer_snippet_found: bool = True,\n ):\n self.chat_model = chat_model\n self.search_results_provider = search_results_provider\n self.page_query_analyzer = page_query_analyzer\n self.skip_results_if_answer_snippet_found = skip_results_if_answer_snippet_found\n\n def get_answer(\n self, query: str, n_results: int = 3, urls: Optional[List[str]] = None, spinner: Optional[Halo] = None\n ) -> Tuple[bool, str]:\n original_spinner_text = None if spinner is None else spinner.text\n qna = []\n\n if urls is None:\n if spinner is not None:\n spinner.start(f'Getting search results for \"{query}\"...')\n\n try:\n search_results = self.search_results_provider.search(query=query, n_results=n_results)\n except (TransientHTTPError, NonTransientHTTPError) as e:\n return False, f'Failed to get search results for \"{query}\" because of an error: {e}'\n\n if spinner is not None:\n spinner.succeed(f'Got search results for \"{query}\".')\n\n if len(search_results.organic_results) == 0 and search_results.answer_snippet is None:\n return False, \"Nothing was found on the web for this query.\"\n\n if search_results.knowledge_graph_description is not None:\n qna.append({\"answer\": search_results.knowledge_graph_description, \"source\": \"Knowledge Graph\"})\n\n if search_results.answer_snippet is not None:\n qna.append({\"answer\": search_results.answer_snippet, \"source\": \"Answer Snippet\"})\n\n if not self.skip_results_if_answer_snippet_found or search_results.answer_snippet is None:\n for result in search_results.organic_results:\n if url_unsupported(result.link):\n continue\n\n if spinner is not None:\n spinner.start(f'Reading & analyzing #{result.position} result \"{result.title}\"')\n\n try:\n page_result = self.page_query_analyzer.analyze(\n url=result.link, title=result.title, query=query, spinner=spinner\n )\n answer = page_result.answer\n\n if spinner is not None:\n spinner.succeed(f'Read & analyzed #{result.position} result \"{result.title}\".')\n except Exception as e:\n if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):\n if spinner is not None:\n spinner.warn(\n f'Failed to read & analyze #{result.position} result \"{result.title}\", moving on.'\n )\n\n answer = \"Unable to answer query because the page could not be read.\"\n else:\n raise\n\n qna.append({\"answer\": answer, \"source\": result.link})\n else:\n # Urls were provided, search in those urls instead of searching using a search engine\n for url in urls:\n if url_unsupported(url):\n continue\n\n if spinner is not None:\n spinner.start(f'Reading & analyzing URL \"{url}\"')\n\n try:\n page_result = self.page_query_analyzer.analyze(\n url=url, title=\"Unknown\", query=query, spinner=spinner\n )\n answer = page_result.answer\n\n if spinner is not None:\n spinner.succeed(f'Read & analyzed URL \"{url}\".')\n except Exception as e:\n if type(e) in (RetryError, TransientHTTPError, NonTransientHTTPError):\n if spinner is not None:\n spinner.warn(f'Failed to read & analyze URL \"{url}\", moving on.')\n\n answer = \"Unable to answer query because the page could not be read.\"\n else:\n raise\n\n qna.append({\"answer\": answer, \"source\": url})\n\n if spinner is not None:\n spinner.start(f\"Processing results...\")\n\n formatted_answers = \"\\n\".join([f'{i + 1}. {q[\"answer\"]}; Source: {q[\"source\"]}' for i, q in enumerate(qna)])\n\n chat = Chat(\n backing_store=InMemoryChatDataBackingStore(),\n renderer=NoChatRenderer(),\n initial_participants=[\n UserChatParticipant(),\n LangChainBasedAIChatParticipant(\n name=\"Query Answer Aggregator\",\n role=\"Query Answer Aggregator\",\n personal_mission=\"Analyze query answers, discard unlikely ones, and provide an aggregated final response.\",\n chat_model=self.chat_model,\n other_prompt_sections=[\n Section(\n name=\"Aggregating Query Answers\",\n sub_sections=[\n Section(\n name=\"Process\",\n list=[\n \"Receive query and answers with sources.\",\n \"Analyze answers, discard unlikely or minority ones.\",\n \"Formulate final answer based on most likely answers.\",\n 'If no data found, respond \"The answer could not be found.\"',\n ],\n list_item_prefix=None,\n ),\n Section(\n name=\"Aggregation\",\n list=[\n \"Base final answer on sources.\",\n \"Incorporate sources as inline citations in Markdown format.\",\n 'Example: \"Person 1 was [elected president in 2012](https://...).\"',\n \"Only include sources from provided answers.\",\n \"If part of an answer is used, use the same links inline.\",\n ],\n ),\n Section(\n name=\"Final Answer Notes\",\n list=[\n \"Do not fabricate information. Stick to provided data.\",\n \"You will be given the top search results from a search engine, there is a reason they are the top results. You should pay attention to all of them and think about the query intent.\"\n \"If the answer is not found in the page data, state it clearly.\",\n \"Should be formatted in Markdown with inline citations.\",\n ],\n ),\n ],\n )\n ],\n ),\n ],\n max_total_messages=2,\n )\n chat_conductor = RoundRobinChatConductor()\n final_answer = chat_conductor.initiate_dialog(\n chat=chat,\n initial_message=str(\n StructuredString(\n sections=[Section(name=\"Query\", text=query), Section(name=\"Answers\", text=formatted_answers)]\n )\n ),\n )\n\n if spinner is not None:\n spinner.succeed(f\"Done searching the web.\")\n\n if original_spinner_text is not None:\n spinner.start(original_spinner_text)\n\n return True, final_answer" }, { "identifier": "WebResearchTool", "path": "chatflock/web_research/web_research.py", "snippet": "class WebResearchTool(BaseTool):\n web_search: WebSearch\n n_results: int = 3\n spinner: Optional[Halo] = None\n name: str = \"web_search\"\n description: str = \"Research the web. Use that to get an answer for a query you don't know or unsure of the answer to, for recent events, or if the user asks you to. This will evaluate answer snippets, knowledge graphs, and the top N results from google and aggregate a result.\"\n args_schema: Type[BaseModel] = WebSearchToolArgs\n progress_text: str = \"Searching the web...\"\n\n def _run(\n self,\n query: str,\n urls: Optional[List[str]] = None,\n run_manager: Optional[CallbackManagerForToolRun] = None,\n **kwargs: Any,\n ) -> Any:\n return self.web_search.get_answer(query=query, n_results=self.n_results, urls=urls, spinner=self.spinner)[1]" } ]
from typing import Any, Dict, Generator, Generic, List, Optional, Type, TypeVar from functools import partial from halo import Halo from langchain.callbacks.manager import CallbackManagerForToolRun from langchain.chat_models.base import BaseChatModel from langchain.llms.openai import OpenAI from langchain.memory import ConversationSummaryBufferMemory from langchain.tools import BaseTool from pydantic import BaseModel, Field from chatflock.backing_stores import InMemoryChatDataBackingStore from chatflock.backing_stores.langchain import LangChainMemoryBasedChatDataBackingStore from chatflock.base import Chat, ChatDataBackingStore from chatflock.conductors import RoundRobinChatConductor from chatflock.parsing_utils import chat_messages_to_pydantic from chatflock.participants.langchain import LangChainBasedAIChatParticipant from chatflock.participants.user import UserChatParticipant from chatflock.renderers import TerminalChatRenderer from chatflock.sequencial_process import SequentialProcess, Step from chatflock.structured_string import Section, StructuredString from chatflock.use_cases.request_response import get_response from chatflock.web_research import WebSearch from chatflock.web_research.web_research import WebResearchTool import datetime import json import questionary
11,236
feedback: str = Field( description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty." ) is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.") def generate_queries( state: BHSRState, chat_model: BaseChatModel, interactive_user: bool = True, max_queries: int = 5, shared_sections: Optional[List[Section]] = None, web_search_tool: Optional[BaseTool] = None, spinner: Optional[Halo] = None, ) -> None: if state.queries_to_run is not None and len(state.queries_to_run) > 0: # Means we are continuing a previous session return if shared_sections is None: shared_sections = [] query_generator = LangChainBasedAIChatParticipant( name="Search Query Generator", role="Search Query Generator", personal_mission="You will be given a specific query or problem by the user and you are to generate a list of " f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you " f"generate comprehensive, counterfactual, and maximally orthogonal search queries. " "Employ everything you know about " "information foraging and information literacy to generate the best possible questions. " "Use a step-by-step approach and think about the information need and the information " "domain before generating the queries. Order the queries by their importance and relevance " "to the main information need of the user.", other_prompt_sections=shared_sections + [ Section( name="Unclear Information Need", text=( "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or ask the user for clarification." if interactive_user else "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or make a best guess. The user will not be available to " "respond back." ), ), Section( name="Refine Queries", text='You might be given a first-pass information need with "None" previous queries and answers, ' "in which case you will do the best you" 'can to generate "naive queries" (uninformed search queries). However the USER might also ' "give you previous search queries or other background information such as accumulated notes. " 'If these materials are present, you are to generate "informed queries" - more specific ' "search queries that aim to zero in on the correct information domain. Do not duplicate " "previously asked questions. Use the notes and other information presented to create " "targeted queries and/or to cast a wider net.", ), Section( name="Termination", text="Once you generate a new set of queries to run, you should terminate the chat immediately by " "ending your message with TERMINATE", ), ], tools=[web_search_tool] if web_search_tool is not None else None, ignore_group_chat_environment=True, chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant() participants = [user, query_generator] try: memory = ConversationSummaryBufferMemory( llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore ) backing_store: ChatDataBackingStore = LangChainMemoryBasedChatDataBackingStore(memory=memory) except ValueError: backing_store = InMemoryChatDataBackingStore() chat = Chat( backing_store=backing_store, renderer=TerminalChatRenderer(), initial_participants=participants, max_total_messages=None if interactive_user else 2, ) chat_conductor = RoundRobinChatConductor() if state.information_need is None: if spinner is not None: spinner.stop() _ = chat_conductor.initiate_dialog( chat=chat, initial_message=f"What is your information need or query?", from_participant=query_generator ) else: _ = chat_conductor.initiate_dialog( chat=chat, initial_message=str( StructuredString( sections=[ Section(name="Information Need", text=state.information_need), Section( name="Previous Queries & Answers", text="None" if state.answers_to_queries is None or len(state.answers_to_queries) == 0 else None, sub_sections=[ Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False) for query, answer in (state.answers_to_queries or {}).items() ], ), Section(name="Current Hypothesis", text=str(state.current_hypothesis)), ] ) ), from_participant=user, )
# Based directly on David Shaprio's BSHR Loop: https://github.com/daveshap/BSHR_Loop class BHSRState(BaseModel): information_need: Optional[str] = None queries_to_run: Optional[List[str]] = None answers_to_queries: Optional[Dict[str, str]] = None current_hypothesis: Optional[str] = None proposed_hypothesis: Optional[str] = None feedback: Optional[str] = None is_satisficed: Optional[bool] = None def save_state(state: BHSRState, state_file: Optional[str]) -> None: if state_file is None: return data = state.model_dump() with open(state_file, "w") as f: json.dump(data, f, indent=2) def load_state(state_file: Optional[str]) -> Optional[BHSRState]: if state_file is None: return None try: with open(state_file) as f: data = json.load(f) return BHSRState.model_validate(data) except FileNotFoundError: return None class QueryGenerationResult(BaseModel): information_need: str = Field(description="Information need as requested by the user.") queries: List[str] = Field(description="Set of queries to run.") class HypothesisGenerationResult(BaseModel): hypothesis: str = Field( description="A new or updated hypothesis based on the materials provided. Rich formatting using Markdown. Should include all relevant citations inline." ) class SatisficationCheckResult(BaseModel): feedback: str = Field( description="If not satisficed yet, feedback on why not satisfied and what to think about next. If satisficed, feedback can be empty." ) is_satisficed: bool = Field(description="Whether or not the information need has been satisficed.") def generate_queries( state: BHSRState, chat_model: BaseChatModel, interactive_user: bool = True, max_queries: int = 5, shared_sections: Optional[List[Section]] = None, web_search_tool: Optional[BaseTool] = None, spinner: Optional[Halo] = None, ) -> None: if state.queries_to_run is not None and len(state.queries_to_run) > 0: # Means we are continuing a previous session return if shared_sections is None: shared_sections = [] query_generator = LangChainBasedAIChatParticipant( name="Search Query Generator", role="Search Query Generator", personal_mission="You will be given a specific query or problem by the user and you are to generate a list of " f"AT MOST {max_queries} search queries that will be used to search the internet. Make sure you " f"generate comprehensive, counterfactual, and maximally orthogonal search queries. " "Employ everything you know about " "information foraging and information literacy to generate the best possible questions. " "Use a step-by-step approach and think about the information need and the information " "domain before generating the queries. Order the queries by their importance and relevance " "to the main information need of the user.", other_prompt_sections=shared_sections + [ Section( name="Unclear Information Need", text=( "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or ask the user for clarification." if interactive_user else "If the information need or query are vague and unclear, either perform a web search to " "clarify the information need or make a best guess. The user will not be available to " "respond back." ), ), Section( name="Refine Queries", text='You might be given a first-pass information need with "None" previous queries and answers, ' "in which case you will do the best you" 'can to generate "naive queries" (uninformed search queries). However the USER might also ' "give you previous search queries or other background information such as accumulated notes. " 'If these materials are present, you are to generate "informed queries" - more specific ' "search queries that aim to zero in on the correct information domain. Do not duplicate " "previously asked questions. Use the notes and other information presented to create " "targeted queries and/or to cast a wider net.", ), Section( name="Termination", text="Once you generate a new set of queries to run, you should terminate the chat immediately by " "ending your message with TERMINATE", ), ], tools=[web_search_tool] if web_search_tool is not None else None, ignore_group_chat_environment=True, chat_model=chat_model, spinner=spinner, ) user = UserChatParticipant() participants = [user, query_generator] try: memory = ConversationSummaryBufferMemory( llm=chat_model, max_token_limit=OpenAI.modelname_to_contextsize(chat_model.model_name) # type: ignore ) backing_store: ChatDataBackingStore = LangChainMemoryBasedChatDataBackingStore(memory=memory) except ValueError: backing_store = InMemoryChatDataBackingStore() chat = Chat( backing_store=backing_store, renderer=TerminalChatRenderer(), initial_participants=participants, max_total_messages=None if interactive_user else 2, ) chat_conductor = RoundRobinChatConductor() if state.information_need is None: if spinner is not None: spinner.stop() _ = chat_conductor.initiate_dialog( chat=chat, initial_message=f"What is your information need or query?", from_participant=query_generator ) else: _ = chat_conductor.initiate_dialog( chat=chat, initial_message=str( StructuredString( sections=[ Section(name="Information Need", text=state.information_need), Section( name="Previous Queries & Answers", text="None" if state.answers_to_queries is None or len(state.answers_to_queries) == 0 else None, sub_sections=[ Section(name=query, text=f"```markdown\n{answer}\n```", uppercase_name=False) for query, answer in (state.answers_to_queries or {}).items() ], ), Section(name="Current Hypothesis", text=str(state.current_hypothesis)), ] ) ), from_participant=user, )
output = chat_messages_to_pydantic(
5
2023-11-12 11:10:58+00:00
16k
atlantic-quantum/Shipyard
shipyard/passes/interpreter.py
[ { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n self.type = ar_type\n self.nesting_level = nesting_level\n self.members = {}\n\n def __setitem__(self, key, value):\n self.members[key] = value\n LOGGER.debug(\"%s: %s\", key, value)\n\n def __getitem__(self, key):\n return self.members[key]\n\n def get(self, key, default=None):\n \"\"\"Gets a member of the activation record by key\"\"\"\n return self.members.get(key, default)\n\n def __str__(self):\n lines = [f\"{self.nesting_level}: {self.type.value} {self.name}\"]\n for name, val in self.members.items():\n lines.append(f\" {name:<20}: {val}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()" }, { "identifier": "ARType", "path": "shipyard/call_stack.py", "snippet": "class ARType(Enum):\n \"\"\"\n Enumeration of Acivation Record Types\n \"\"\"\n\n PROGRAM = \"PROGRAM\"\n EXTERN = \"EXTERN\"\n SUBROUTINE = \"SUBROUTINE\"\n CALIBRATION = \"CALIBRATION\"\n DEFCAL = \"DEFCAL\"\n GATE = \"GATE\"\n LOOP = \"LOOP\"" }, { "identifier": "CallStack", "path": "shipyard/call_stack.py", "snippet": "class CallStack:\n \"\"\"\n CallStack for the shipyard\n \"\"\"\n\n def __init__(self):\n self._records = []\n\n def push(self, activation_record: ActivationRecord):\n \"\"\"\n Pushes records onto the top of the call stack\n\n Args:\n activation_record (ActivationRecord): record to put on top of the stack\n \"\"\"\n self._records.append(activation_record)\n\n def pop(self) -> ActivationRecord:\n \"\"\"\n Pops the latest record of the call stack and returns it\n\n Returns:\n ActivationRecord: latest activation record on the stack\n \"\"\"\n return self._records.pop()\n\n def peek(self) -> ActivationRecord:\n \"\"\"\n returns the latest record of the call stack\n\n Returns:\n ActivationRecord: latest activation record on the stack\n \"\"\"\n return self._records[-1]\n\n def down_stack(self, name: str) -> ActivationRecord:\n \"\"\"\n Searches the stack for an activation record containing the name\n \"\"\"\n for record in reversed(self._records):\n if name in record.members.keys():\n return record\n raise KeyError(f\"Could not find {name} in call stack\")\n\n def get(self, name: str):\n return self.down_stack(name)[name]\n\n @property\n def nesting_level(self):\n return self.peek().nesting_level\n\n def __str__(self):\n string = \"\\n\".join(repr(actr) for actr in reversed(self._records))\n string = f\"CALL STACK\\n{string}\\n\"\n return string\n\n def __repr__(self):\n return self.__str__()" }, { "identifier": "Error", "path": "shipyard/compiler_error.py", "snippet": "class Error(Exception):\n \"\"\"Base Error Class for shipyard\"\"\"\n\n def __init__(self, error_code=None, message=None):\n self.error_code = error_code\n # self.token = token\n # add exception class name before the message\n class_name = self.__class__.__name__.rsplit(\".\", maxsplit=1)[-1]\n self.message = f\"{class_name}: ({self.error_code}) {message}\"\n super().__init__(self.message)" }, { "identifier": "ErrorCode", "path": "shipyard/compiler_error.py", "snippet": "class ErrorCode(Enum):\n \"\"\"Class to enumerate error codes of the shipyard\"\"\"\n\n ID_NOT_FOUND = \"Identifier not found\"\n DUPLICATE_ID = \"Duplicate id found\"\n NOT_IN_GLOBAL_SCOPE = \"Not in global scope\"\n INVALID_DEFCAL_ARGUMENT = \"Invalid defcal argument\"\n EXPRESSION_IN_DEFCAL = \"Expression in defcal signature, unhandled\"\n INVALID_GATECALL_ARGUMENT = \"Invalid gatecall argument\"\n UNHANDLED = \"Unhandled case\"\n UNDETERMINED_CALL = \"Unable to determine a unique function for function call\"\n NO_SEQC_STATEMENT = \"No equivalent SEQC statement\"\n COMPILE_OUT = \"Statement should be compiled out before printing SEQC code\"\n PORT_NOT_FOUND = \"Port was not found within setup\"\n INSTRUMENT_NOT_FOUND = \"Instrument was not found within setup\"\n INPUT_NOT_FOUND = \"Input value was not found\"\n OUTPUT_NOT_SUPPORTED = \"Output type not supported\"\n INPUT_TYPE_NOT_SUPPORTED = \"Input type not supported\"\n INVALID_ARGUMENT = \"Invalid argument\"\n INVALID_WAVEFORM = \"Waveform does not meet timing constraints\"\n INCLUDE_ERROR = \"Error in include statement\"" }, { "identifier": "SemanticError", "path": "shipyard/compiler_error.py", "snippet": "class SemanticError(Error):\n \"\"\"Error class for semantic errors, raised by SemanticAnalyser\"\"\"" }, { "identifier": "LOGGER", "path": "shipyard/logger.py", "snippet": "LOGGER = logging.getLogger(\"Compiler\")" }, { "identifier": "Mangler", "path": "shipyard/mangle.py", "snippet": "class Mangler(LiteralVisitor, TypeVisitor, GenericVisitor):\n \"\"\"\n QASMVisitor that visits CalibrationDefinition or QuantumGate nodes to gather\n the iformation required to mangle function definition signatures and function calls\n \"\"\"\n\n def __init__(\n self, node: ast.CalibrationDefinition | ast.QuantumGate = None\n ) -> None:\n super().__init__()\n self.name = None\n self.qubits = None\n self.arguments = None\n self.return_type = None\n if node:\n self.visit(node)\n\n def signature(self) -> FunctionSignature:\n \"\"\"Converts instances of Mangler class to FunctionSignature objects\n\n Returns:\n FunctionSignature:\n with name, params qubits and return_type from the Mangler class instance\n \"\"\"\n return FunctionSignature(\n name=self.name,\n params=self.arguments,\n qubits=self.qubits,\n return_type=self.return_type,\n )\n\n # pylint: disable=C0103\n # disable snake_case naming style\n # these functions are of the form \"visit_{QASMNode class name}\"\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition):\n \"\"\"\n CalibrationDefinition node visitor\n Extracts name, arguments, qubits and return_type from the node\n and makes them usable for mangling\n\n Args:\n node (ast.CalibrationDefinition):\n openQASM defcal statement to visit\n \"\"\"\n self.name = self.visit(node.name)\n self.arguments = [self.visit(arg) for arg in node.arguments]\n self.qubits = [self.visit(qubit) for qubit in node.qubits]\n self.return_type = self.visit(node.return_type) if node.return_type else \"\"\n\n def visit_QuantumGate(self, node: ast.QuantumGate):\n \"\"\"\n QuantumGate node visitor\n Extracts name, arguments, qubits and return_type from the node\n and makes them usable for mangling\n\n Args:\n node (ast.QuantumGate):\n openQASM quantum gate call node to visit\n \"\"\"\n self.name = self.visit(node.name)\n self.arguments = [self.visit(arg) for arg in node.arguments]\n self.qubits = [self.visit(qubit) for qubit in node.qubits]\n self.return_type = \"\"\n\n def visit_QuantumReset(self, node: ast.QuantumReset):\n \"\"\"\n QuantumReset node visitor\n Extracts qubits from the node.\n To be usable for mangling the following operations are performed\n name is set to \"reset\"\n arguments are set to empty ([])\n return_type is set to empty string (\"\")\n\n Args:\n node (ast.QuantumReset):\n openQASM quantum reset node to visit\n \"\"\"\n match node:\n case ast.QuantumReset(ast.Identifier(q)):\n self.name = \"reset\"\n self.arguments = []\n self.qubits = [q]\n self.return_type = \"\"\n case ast.QuantumReset(ast.IndexedIdentifier()):\n raise NotImplementedError\n case _:\n raise NotImplementedError # this should not happen on correct trees\n\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement):\n \"\"\"\n QuantumMeasurement node visitor\n Extracts qubits from the node.\n To be usable for mangling the following operations are performed\n name is set to \"measure\"\n arguments are set to empty ([])\n return_type is set \"BIT\"\n\n Args:\n node (ast.QuantumMeasurement):\n openQASM quantum measurement node to visit\n \"\"\"\n match node:\n case ast.QuantumMeasurement(ast.Identifier(q)):\n self.name = \"measure\"\n self.arguments = []\n self.qubits = [q]\n self.return_type = \"BIT\"\n case ast.QuantumMeasurement(ast.IndexedIdentifier()):\n raise NotImplementedError\n case _:\n raise NotImplementedError # this should not happen on correct trees\n\n def visit_Identifier(self, node: ast.Identifier) -> str:\n \"\"\"\n Identifier node visitor\n\n Args:\n node (ast.Identifier):\n openQASM identifier node to visit\n\n Returns:\n str: the name of the identifier\n \"\"\"\n return node.name\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument) -> str:\n \"\"\"\n ClassicalArgument node visitor\n\n Args:\n node (ast.ClassicalArgument):\n openQASM classical argument node to visit\n\n Returns:\n str: the type of the classical argument\n \"\"\"\n return self.visit(node.type)\n\n # pylint: enable=C0103" }, { "identifier": "Frame", "path": "shipyard/setup/internal.py", "snippet": "class Frame(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse frame concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#frames\n\n Args:\n name (str):\n name of the frame.\n port (Port):\n the Port object the frame is associated with.\n frequency (float):\n the frequency the frame evolves at. Defaults to 0.\n phase (float):\n the phase of the frame.\n time (Duration):\n the time of the frame.\n \"\"\"\n\n name: str\n port: Port\n frequency: float = 0.0\n phase: float = 0.0\n time: Duration = Duration(time=0)\n\n def set_phase(self, phase: float):\n \"\"\"Sets the phase of the frame\n\n Args:\n phase (float): the value the phase will be set to\n \"\"\"\n self.phase = phase\n\n def shift_phase(self, phase: float):\n \"\"\"Shifts the phase of the frame\n\n Args:\n phase (float): the value the phase will be shifted by.\n \"\"\"\n self.phase += phase\n\n def get_phase(self) -> float:\n \"\"\"Gets the phase of the frame\n\n Returns:\n float: current value of the phase of the frame.\n \"\"\"\n return self.phase\n\n def set_frequency(self, frequency: float):\n \"\"\"Sets the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be set to.\n \"\"\"\n self.frequency = frequency\n\n def shift_frequency(self, frequency: float):\n \"\"\"Shifts the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be shifted by.\n \"\"\"\n self.frequency += frequency\n\n def get_frequency(self) -> float:\n \"\"\"Gets the frequency of the frame\n\n Returns:\n float: current value of the frequency of the frame.\n \"\"\"\n return self.frequency\n\n def advance(self, duration: Duration):\n \"\"\"Advances the time of the frame by some duration\n\n Args:\n duration (Duration): the duration to advance the time of the frame by.\n \"\"\"\n self.time += duration\n\n def advance_to(self, duration: Duration):\n \"\"\"Advances the time of the frame to some other time\n\n Args:\n duration (Duration): the duratioin to advance the time fo the frame to.\n\n Raises:\n ValueError:\n If the time the frame should be advanced to is less than the\n current time of the frame.\n \"\"\"\n duration.set_unit(self.time.unit)\n if self.time > duration:\n raise ValueError(f\"Cant advance current time {self.time} to {duration}\")\n self.time.time = int(duration.time * duration.unit.value / self.time.unit.value)" }, { "identifier": "SetupInternal", "path": "shipyard/setup/internal.py", "snippet": "class SetupInternal(BaseModel):\n\n \"\"\"\n A Pydantic model containing the information required to compile an openQASM program\n to instrument level instructions.\n\n It is recommended to instanciate this object from a configuration file\n (json (future yml?))\n \"\"\"\n\n # todo validation\n\n # todo move to own module\n instruments: dict[str, Instrument]\n ports: dict[str, Port]\n frames: dict[str, Frame]\n\n @classmethod\n def from_dict(cls, setup: dict[str, dict[str, dict]]) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a dictionary\n\n Args:\n setup (dict[str, dict[str, dict]]): dictionary to create a Setup object from\n\n Returns:\n Setup: created from dictionary\n \"\"\"\n instruments = {\n k: Instrument(name=k, **v) for k, v in setup[\"Instruments\"].items()\n }\n ports = {}\n for k, val in setup[\"Ports\"].items():\n val[\"instrument\"] = instruments[val[\"instrument\"]]\n val[\"core\"] = Port.Core(**val[\"core\"])\n ports[k] = Port(name=k, **val)\n frames = {}\n for k, val in setup[\"Frames\"].items():\n val[\"port\"] = ports[val[\"port\"]]\n frames[k] = Frame(name=k, **val)\n return cls(instruments=instruments, ports=ports, frames=frames)\n\n def to_dict(self) -> dict[str, dict[str, dict]]:\n \"\"\"Creates a dictionary from a Setup object\n\n Args:\n filename (Path | str, optional):\n path to save dictionary to. Defaults to None.\n\n Returns:\n dict[str, dict[str, dict]]: dictionary created from Setup object\n \"\"\"\n setup = {\n \"Instruments\": {\n k: {\n \"type\": v.type,\n \"serial\": v.serial,\n }\n for k, v in self.instruments.items()\n },\n \"Ports\": {\n k: {\n \"instrument\": v.instrument.name,\n \"core\": {\n \"type\": v.core.type.value,\n \"index\": v.core.index,\n \"channels\": v.core.channels,\n },\n }\n for k, v in self.ports.items()\n },\n \"Frames\": {\n k: {\n \"port\": v.port.name,\n \"frequency\": v.frequency,\n \"phase\": v.phase,\n }\n for k, v in self.frames.items()\n },\n }\n return setup\n\n @classmethod\n def from_json(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a json file\n\n Args:\n filename (str | Path): path to json file\n\n Returns:\n Setup: created from json file\n \"\"\"\n with open(filename, encoding=\"utf-8\") as file:\n data = json.load(file)\n return cls.from_dict(data)\n\n def to_json(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a json file\n\n Args:\n filename (str | Path): path to json file to create\n\n Returns:\n Path: path to json file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n return Path(filename)\n\n @classmethod\n def from_yml(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a yml file\n\n Args:\n filename (str | Path): path to yml file\n\n Returns:\n Setup: created from yml file\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = yaml.safe_load(file)\n return cls.from_dict(data)\n\n def to_yml(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a yml file\n\n Args:\n filename (str | Path): path to yml file to create\n\n Returns:\n Path: path to yml file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(data, file)\n return Path(filename)\n\n def cores(self) -> set[tuple[str, int, str]]:\n \"\"\"Gets all the AWG Cores used in the setup\n\n Returns:\n set[tuple[str, int, str]]:\n a Set of tuples, each tuple has a string representing the instruement\n name, a integer representing the index of the awg core of the\n instrument and a string representing the type of the awg core.\n \"\"\"\n return set(\n (port.instrument.name, port.core.index, port.core.type.value)\n for port in self.ports.values()\n )" }, { "identifier": "GenericVisitor", "path": "shipyard/visitors/generic_visitor.py", "snippet": "class GenericVisitor(QASMVisitor):\n def _visit_list(\n self, nodes: list[ast.QASMNode], visit_function: callable, context=None\n ):\n [visit_function(node) for node in nodes]\n\n def visit_Program(self, node: ast.Program, context=None):\n \"\"\"\n An entire OpenQASM 3 program represented by a list of top level statements\n \"\"\"\n self._visit_list(node.statements, self.visit)\n\n def visit_Annotation(self, node: ast.Annotation, context=None):\n \"\"\"An annotation applied to a statment.\"\"\"\n\n def visit_Statement(self, node: ast.Statement, context=None):\n \"\"\"A statement: anything that can appear on its own line\"\"\"\n self._visit_list(node.annotations, self.visit)\n\n def visit_Include(\n self, node: ast.Include, context=None\n ) -> ast.Include | list[ast.Statement]:\n \"\"\"\n An include statement\n \"\"\"\n self.visit_Statement(node)\n\n def visit_ExpressionStatement(self, node: ast.ExpressionStatement, context=None):\n \"\"\"A statement that contains a single expression\"\"\"\n self.visit_Statement(node)\n self.visit(node.expression)\n\n # Note that QubitDeclaration is not a valid QuantumStatement, because qubits\n # can only be declared in global scopes, not in gates.\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration, context=None):\n \"\"\"\n Global qubit declaration\n\n Example::\n\n qubit q;\n qubit[4] q;\n\n q // <- qubit\n 4 // <- size\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.qubit)\n if node.size:\n self.visit(node.size)\n\n def visit_QuantumGateDefinition(\n self, node: ast.QuantumGateDefinition, context=None\n ):\n \"\"\"\n Define a new quantum gate\n\n Example::\n\n gate cx c, t {\n ctrl @ unitary(pi, 0, pi) c, t;\n }\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit_Identifier)\n self._visit_list(node.qubits, self.visit_Identifier)\n self._visit_list(node.body, self.visit)\n\n def visit_QuantumStatement(self, node: ast.QuantumStatement, context=None):\n \"\"\"Statements that may appear inside a gate declaration\"\"\"\n self.visit_Statement(node)\n\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration, context=None):\n \"\"\"\n A extern declaration\n\n Example::\n\n extern get_pauli(int[prec], context=None) -> bit[2 * n];\n\n get_pauli // <- name\n int[prec] // <- classical type\n bit[2 * n] // <- return type\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_Expression(self, node: ast.Expression, context=None):\n \"\"\"An expression: anything that returns a value\"\"\"\n\n def visit_Identifier(self, node: ast.Identifier, context=None):\n \"\"\"\n An identifier\n\n Example::\n\n q1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression, context=None):\n \"\"\"\n A unary expression\n\n Example::\n\n ~b\n !bool\n -i\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression, context=None):\n \"\"\"\n A binary expression\n\n Example::\n\n q1 || q2\n\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral, context=None):\n \"\"\"\n An integer literal\n\n Example::\n\n 1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral, context=None):\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral, context=None):\n \"\"\"\n An real number literal\n\n Example::\n\n 1.1im\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral, context=None):\n \"\"\"\n A boolean expression\n\n Example::\n\n true\n false\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral, context=None):\n \"\"\"A literal bitstring value. The ``value`` is the numerical value of the\n bitstring, and the ``width`` is the number of digits given.\"\"\"\n self.visit_Expression(node)\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral, context=None):\n \"\"\"\n A duration literal\n\n Example::\n\n 1.0ns\n\n \"\"\"\n self.visit_Expression(node)\n\n def visit_ArrayLiteral(self, node: ast.ArrayLiteral, context=None):\n \"\"\"Array literal, used to initialise declared arrays.\n\n For example::\n\n array[uint[8], 2] row{1, 2};\n array[uint[8], 2, 2] my_array{{1, 2}, {3, 4}};\n array[uint[8], 2, 2] my_array{row, row};\n \"\"\"\n self.visit_Expression(node)\n self._visit_list(node.values, self.visit)\n\n def visit_FunctionCall(self, node: ast.FunctionCall, context=None):\n \"\"\"\n A function call expression\n\n Example::\n\n foo(1)\n\n foo // <- name\n\n \"\"\"\n self.visit_Expression(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n\n def visit_Cast(self, node: ast.Cast, context=None):\n \"\"\"\n A cast call expression\n\n Example::\n\n counts += int[1](b);\n\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.type)\n self.visit(node.argument)\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet, context=None):\n \"\"\"\n A set of discrete values. This can be used for the values in a ``for``\n loop, or to index certain values out of a register::\n\n for i in {1, 2, 3} {}\n let aliasqubits[{2, 3, 4}];\n \"\"\"\n self._visit_list(node.values, self.visit)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition, context=None):\n \"\"\"\n Range definition.\n\n Example::\n\n 1:2\n 1:1:10\n :\n \"\"\"\n if node.start:\n self.visit(node.start)\n if node.end:\n self.visit(node.end)\n if node.step:\n self.visit(node.step)\n\n IndexElement = ast.DiscreteSet | list[ast.Expression | ast.RangeDefinition]\n\n def _visit_IndexElement(self, node: IndexElement, context=None):\n if isinstance(node, list):\n return self._visit_list(node, self.visit)\n return self.visit(node)\n\n def visit_IndexExpression(self, node: ast.IndexExpression, context=None):\n \"\"\"\n An index expression.\n\n Example::\n\n q[1]\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.collection)\n self._visit_IndexElement(node.index)\n\n def visit_IndexedIdentifier(self, node: ast.IndexedIdentifier, context=None):\n \"\"\"An indentifier with index operators, such that it can be used as an\n lvalue. The list of indices is subsequent index brackets, so in::\n\n a[{1, 2, 3}][0:1, 0:1]\n\n the list of indices will have two elements. The first will be a\n :class:`.DiscreteSet`, and the second will be a list of two\n :class:`.RangeDefinition`\\\\ s.\n \"\"\"\n self.visit_Identifier(node.name)\n self._visit_list(node.indices, self._visit_IndexElement)\n\n def visit_Concatenation(self, node: ast.Concatenation, context=None):\n \"\"\"\n Concatenation of two registers, for example::\n\n a ++ b\n a[2:3] ++ a[0:1]\n \"\"\"\n self.visit_Expression(node)\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_QuantumGate(self, node: ast.QuantumGate, context=None):\n \"\"\"\n Invoking a quantum gate\n\n Example::\n cx[dur] 0, 1;\n\n or\n\n ctrl @ p(λ) a, b;\n\n ctrl @ // <- quantumGateModifier\n p // <- quantumGateName\n λ // <- argument\n a, b // <- qubit\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.modifiers, self.visit_QuantumGateModifier)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.qubits, self.visit)\n if node.duration:\n self.visit(node.duration)\n\n def visit_QuantumGateModifier(self, node: ast.QuantumGateModifier, context=None):\n \"\"\"\n A quantum gate modifier\n\n Attributes:\n modifier: 'inv', 'pow', or 'ctrl'\n expression: only pow modifier has expression.\n\n Example::\n\n inv @\n pow(1/2)\n ctrl\n \"\"\"\n if node.argument:\n self.visit(node.argument)\n\n def visit_QuantumPhase(self, node: ast.QuantumPhase, context=None):\n \"\"\"\n A quantum phase instruction\n\n Example::\n\n ctrl @ gphase(λ) a;\n\n ctrl @ // <- quantumGateModifier\n λ // <- argument\n a // <- qubit\n\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.modifiers, self.visit_QuantumGateModifier)\n self.visit(node.argument)\n self._visit_list(node.qubits, self.visit)\n\n # Not a full expression because it can only be used in limited contexts.\n def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement, context=None):\n \"\"\"\n A quantum measurement instruction\n\n Example::\n\n measure q;\n \"\"\"\n self.visit(node.qubit)\n\n # Note that this is not a QuantumStatement because it involves access to\n # classical bits.\n def visit_QuantumMeasurementStatement(\n self, node: ast.QuantumMeasurementStatement, context=None\n ):\n \"\"\"Stand-alone statement of a quantum measurement, potentially assigning the\n result to a classical variable. This is not the only statement that\n `measure` can appear in (it can also be in classical declaration statements\n and returns).\"\"\"\n self.visit_Statement(node)\n self.visit_QuantumMeasurement(node.measure)\n if node.target:\n self.visit(node.target)\n\n def visit_QuantumBarrier(self, node: ast.QuantumBarrier, context=None):\n \"\"\"\n A quantum barrier instruction\n\n Example::\n\n barrier q;\n \"\"\"\n self.visit_QuantumStatement(node)\n self._visit_list(node.qubits, self.visit)\n\n def visit_QuantumReset(self, node: ast.QuantumReset, context=None):\n \"\"\"\n A reset instruction.\n\n Example::\n\n reset q;\n \"\"\"\n\n self.visit_QuantumStatement(node)\n self.visit(node.qubits)\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument, context=None):\n \"\"\"\n Classical argument for a gate or subroutine declaration\n \"\"\"\n self.visit(node.type)\n self.visit_Identifier(node.name)\n\n def visit_ExternArgument(self, node: ast.ExternArgument, context=None):\n \"\"\"Classical argument for an extern declaration.\"\"\"\n\n self.visit(node.type)\n\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration, context=None):\n \"\"\"\n Classical variable declaration\n\n Example::\n\n bit c;\n \"\"\"\n\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n if node.init_expression:\n self.visit(node.init_expression)\n\n def visit_IODeclaration(self, node: ast.IODeclaration, context=None):\n \"\"\"\n Input/output variable declaration\n\n Exampe::\n\n input angle[16] theta;\n output bit select;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration, context=None):\n \"\"\"\n Constant declaration\n\n Example::\n\n const int[16] n10;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n self.visit(node.init_expression)\n\n def visit_ClassicalType(self, node: ast.ClassicalType, context=None):\n \"\"\"\n Base class for classical type\n \"\"\"\n\n def visit_IntType(self, node: ast.IntType, context=None):\n \"\"\"\n Node representing a classical ``int`` (signed integer) type, with an\n optional precision.\n\n Example:\n\n int[8]\n int[16]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_UintType(self, node: ast.UintType, context=None):\n \"\"\"\n Node representing a classical ``uint`` (unsigned integer) type, with an\n optional precision.\n\n Example:\n\n uint[8]\n uint[16]\n \"\"\"\n\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_FloatType(self, node: ast.FloatType, context=None):\n \"\"\"\n Node representing the classical ``float`` type, with the particular IEEE-754\n floating-point size optionally specified.\n\n Example:\n\n float[16]\n float[64]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_ComplexType(self, node: ast.ComplexType, context=None):\n \"\"\"\n Complex ClassicalType. Its real and imaginary parts are based on other\n classical types.\n\n Example::\n\n complex[float]\n complex[float[32]]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.base_type:\n self.visit(node.base_type)\n\n def visit_AngleType(self, node: ast.AngleType, context=None):\n \"\"\"\n Node representing the classical ``angle`` type, with an optional precision.\n\n Example::\n\n angle[8]\n angle[16]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_BitType(self, node: ast.BitType, context=None):\n \"\"\"\n Node representing the classical ``bit`` type, with an optional size.\n\n Example::\n\n bit[8]\n creg[8]\n \"\"\"\n self.visit_ClassicalType(node)\n if node.size:\n self.visit(node.size)\n\n def visit_BoolType(self, node: ast.BoolType, context=None):\n \"\"\"\n Leaf node representing the Boolean classical type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_ArrayType(self, node: ast.ArrayType, context=None):\n \"\"\"Type of arrays that include allocation of the storage.\n\n This is generally any array declared as a standard statement, but not\n arrays declared by being arguments to subroutines.\n \"\"\"\n self.visit_ClassicalType(node)\n self.visit(node.base_type)\n self._visit_list(node.dimensions, self.visit)\n\n def visit_ArrayReferenceType(self, node: ast.ArrayReferenceType, context=None):\n \"\"\"Type of arrays that are a reference to an array with allocated storage.\n\n This is generally any array declared as a subroutine argument. The\n dimensions can be either a list of expressions (one for each dimension), or\n a single expression, which is the number of dimensions.\n\n For example::\n\n // `a` will have dimensions `[IntegerLiteral(2)]` (with a list), because\n // it is a 1D array, with a length of 2.\n def f(const array[uint[8], 2] a) {}\n // `b` will have dimension `IntegerLiteral(3)` (no list), because it is\n // a 3D array, but we don't know the lengths of its dimensions.\n def f(const array[uint[8], #dim=3] b) {}\n \"\"\"\n\n self.visit_ClassicalType(node)\n self.visit(node.base_type)\n if isinstance(node.dimensions, list):\n self._visit_list(node.dimensions, self.visit)\n else:\n self.visit(node.dimensions)\n\n def visit_DurationType(self, node: ast.DurationType, context=None):\n \"\"\"\n Leaf node representing the ``duration`` type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_StretchType(self, node: ast.StretchType, context=None) -> ast.StretchType:\n \"\"\"\n Leaf node representing the ``stretch`` type.\n \"\"\"\n self.visit_ClassicalType(node)\n\n def visit_CalibrationGrammarDeclaration(\n self, node: ast.CalibrationGrammarDeclaration, context=None\n ):\n \"\"\"\n Calibration grammar declaration\n\n Example::\n\n defcalgrammar \"openpulse\";\n \"\"\"\n\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement, context=None):\n \"\"\"An inline ``cal`` statement for embedded pulse-grammar interactions.\n\n Example::\n\n cal {\n shift_phase(drive($0), theta);\n }\n \"\"\"\n self.visit_Statement(node)\n self._visit_list(node.body, self.visit)\n\n def visit_CalibrationBlock(self, node: ast.CalibrationBlock, context=None):\n self._visit_list(node.body, self.visit)\n\n def visit_CalibrationDefinition(\n self, node: ast.CalibrationDefinition, context=None\n ):\n \"\"\"\n Calibration definition\n\n Example::\n\n defcal rz(angle[20] theta) q {\n shift_phase drive(q), -theta;\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.qubits, self.visit_Identifier)\n self._visit_list(node.body, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition, context=None):\n \"\"\"\n Subroutine definition\n\n Example::\n\n def measure(qubit q, context=None) -> bit {\n s q;\n h q;\n return measure q;\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.name)\n self._visit_list(node.arguments, self.visit)\n self._visit_list(node.body, self.visit)\n if node.return_type:\n self.visit(node.return_type)\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument, context=None):\n \"\"\"\n Quantum argument for a subroutine declaration\n \"\"\"\n self.visit_Identifier(node.name)\n if node.size:\n self.visit(node.size)\n\n def visit_ReturnStatement(self, node: ast.ReturnStatement, context=None):\n \"\"\"\n Classical or quantum return statement\n\n Example::\n\n return measure q;\n\n return a + b\n\n \"\"\"\n self.visit_Statement(node)\n if node.expression:\n self.visit(node.expression)\n\n def visit_BreakStatement(self, node: ast.BreakStatement, context=None):\n \"\"\"\n Break statement\n\n Example::\n\n break;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_ContinueStatement(self, node: ast.ContinueStatement, context=None):\n \"\"\"\n Continue statement\n\n Example::\n\n continue;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_EndStatement(self, node: ast.EndStatement, context=None):\n \"\"\"\n End statement\n\n Example::\n\n end;\n \"\"\"\n self.visit_Statement(node)\n\n def visit_BranchingStatement(self, node: ast.BranchingStatement, context=None):\n \"\"\"\n Branch (``if``) statement\n\n Example::\n\n if (temp == 1) {\n ry(-pi / 2) scratch[0];\n } else continue;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.condition)\n self._visit_list(node.if_block, self.visit)\n self._visit_list(node.else_block, self.visit)\n\n def visit_WhileLoop(self, node: ast.WhileLoop, context=None):\n \"\"\"\n While loop\n\n Example::\n\n while(~success) {\n reset magic;\n ry(pi / 4) magic;\n successdistill(magic, scratch);\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.while_condition)\n self._visit_list(node.block, self.visit)\n\n def visit_ForInLoop(self, node: ast.ForInLoop, context=None):\n \"\"\"\n For in loop\n\n Example::\n\n for i in [0: 2] {\n majority a[i], b[i + 1], a[i + 1];\n }\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.type)\n self.visit_Identifier(node.identifier)\n self.visit(node.set_declaration)\n self._visit_list(node.block, self.visit)\n\n def visit_DelayInstruction(self, node: ast.DelayInstruction, context=None):\n \"\"\"\n Delay instruction\n\n Example::\n\n delay[start_stretch] $0;\n \"\"\"\n self.visit_QuantumStatement(node)\n self.visit(node.duration)\n self._visit_list(node.qubits, self.visit)\n\n def visit_Box(self, node: ast.Box, context=None):\n \"\"\"\n Timing box\n\n Example::\n\n box [maxdur] {\n delay[start_stretch] $0;\n x $0;\n }\n \"\"\"\n self.visit_QuantumStatement(node)\n if node.duration:\n self.visit(node.duration)\n self._visit_list(node.body, self.visit)\n\n def visit_DurationOf(self, node: ast.DurationOf, context=None):\n \"\"\"\n Duration Of\n\n Example::\n\n durationof({x $0;})\n \"\"\"\n self.visit_Expression(node)\n self._visit_list(node.target, self.visit)\n\n def visit_SizeOf(self, node: ast.SizeOf, context=None):\n \"\"\"``sizeof`` an array's dimensions.\"\"\"\n self.visit_Expression(node)\n self.visit(node.target)\n if node.index:\n self.visit(node.index)\n\n def visit_AliasStatement(self, node: ast.AliasStatement, context=None):\n \"\"\"\n Alias statement\n\n Example::\n\n let aqubits[0];\n\n \"\"\"\n self.visit_Statement(node)\n self.visit_Identifier(node.target)\n self.visit(node.value)\n\n def visit_ClassicalAssignment(self, node: ast.ClassicalAssignment, context=None):\n \"\"\"\n Classical assignment\n\n Example::\n\n a[0]1;\n \"\"\"\n self.visit_Statement(node)\n self.visit(node.lvalue)\n self.visit(node.rvalue)\n\n def visit_Pragma(self, node: ast.Pragma, context=None):\n \"\"\"\n Pragma\n Example::\n\n #pragma val1 val2 val3\n \"\"\"\n\n def visit_WaveformType(self, node: ast.WaveformType, context=None):\n self.visit_ClassicalType(node)\n\n def visit_PortType(self, node: ast.PortType, context=None):\n self.visit_ClassicalType(node)\n\n def visit_FrameType(self, node: ast.FrameType, context=None):\n self.visit_ClassicalType(node)" } ]
import functools import operator import numpy as np from contextlib import contextmanager from openpulse import ast from ..call_stack import ActivationRecord, ARType, CallStack from ..compiler_error import Error, ErrorCode, SemanticError from ..logger import LOGGER from ..mangle import Mangler from ..setup.internal import Frame, SetupInternal from ..visitors import GenericVisitor as QASMVisitor
12,825
def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None: """ CalibrationDefinition (defcal) node visitor: Saves defcal defintions to self.defcal_nodes dictionary with a mangled name. These mangled names are also saved to a list of defcal names (self.defcal_names) Args: node (ast.CalibrationDefinition): defcal node to visit """ mangled_name = Mangler(node).signature().mangle() self.defcal_names.append(mangled_name) self.defcal_nodes[mangled_name] = node @_maybe_annotated def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None: """ CalibrationStatement node visitor: Evaluates each line in a calibration block. Updates the self.calibration_scope dictionary which maintains a dictionary of values/variables in calibration scope. Args: node (ast.CalibrationStatement): openQASM CalibrationStatement AST node """ curr_nesting = self.call_stack.peek().nesting_level outer_activation_record = ActivationRecord( name="outer_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 1, ) outer_activation_record.members = self.calibration_scope with self.ar_context_manager(outer_activation_record): inner_activation_record = ActivationRecord( name="new_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 2, ) with self.ar_context_manager(inner_activation_record): for statement in node.body: self.visit(statement) self.calibration_scope.update(self.call_stack.peek().members) def visit_QuantumArgument(self, node: ast.QuantumArgument) -> None: """Raises error""" self.visit(node.name) @_maybe_annotated def visit_BreakStatement(self, node: ast.BreakStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_ContinueStatement(self, node: ast.ContinueStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_EndStatement(self, node: ast.EndStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_WhileLoop(self, node: ast.WhileLoop) -> None: """ WhileLoop node visitor: Prints out a while loop in SEQC format (which happens to be identical to openQASM format) All the statements in the block of the while loop are visited Example: qasm: while (int i < 10) {...; i=i+1;} -> seqc: while (cvar i < 10) {...; i=i+1;} Args: node (ast.WhileLoop): openQASM WhileLoop AST node context (PrinterState): state of the printer (e.g. indentation) """ if not self.visit_loops: return activation_record = ActivationRecord( name="while loop", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): # todo break if while_condition is just True (i.e. infiinite loop) while self.visit(node.while_condition): for statement in node.block: self.visit(statement) @_maybe_annotated def visit_ForInLoop(self, node: ast.ForInLoop) -> None: """ ForInLoop node visitor: Evaluates iteration range of for loop and then evaluates the body of the for loop for each iteration. Args: node (ast.ForInLoop): openQASM ForInLoop AST node Raises: Error: ErrorCode.UNHANDLED If the SET iterated over by the ForInLoop is incorrectly defined or not created using a RangeDefinition """ if not self.visit_loops: return name = node.identifier.name activation_record = ActivationRecord( name=f"for_loop_{self.call_stack.nesting_level+1}", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): start, end, step = self.visit(node.set_declaration) if end is None:
# pylint: disable=C0302 # Too many lines in module """ Interpreter: Class for evaluating OpenQASM ASTs """ # pylint: disable=W0221,R0904 def _maybe_annotated(method): # pragma: no cover @functools.wraps(method) def annotated(self: "Interpreter", node: ast.Statement) -> None: for annotation in node.annotations: self.visit(annotation) return method(self, node) return annotated # redefine IndexElement as it is not accessible from the openqasm3.ast IndexElement = ast.DiscreteSet | list[ast.Expression | ast.RangeDefinition] class Interpreter(QASMVisitor): """AST-visitor for evaluating OpenQASM code. Class maintains a call stack of activation records, which hold variable/literals information. Also maintains record of external functions, subroutines, and quantum gates. If subclassing, generally only the specialised ``visit_*`` methods need to be overridden. These are derived from the base class, and use the name of the relevant :mod:`AST node <.ast>` verbatim after ``visit_``. Based on the openQASM3 Printer""" def __init__( self, setup: SetupInternal = None, external_funcs: dict = None, visit_loops: bool = True, ): self.call_stack = CallStack() self.setup = setup self.external_funcs = external_funcs self.calibration_scope = {} self.defcal_nodes = {} self.defcal_names = [] self.subroutines = {} self.visit_loops = visit_loops def visit_Program(self, node: ast.Program) -> None: activation_record = ActivationRecord( name="main", ar_type=ARType.PROGRAM, nesting_level=1 ) with self.ar_context_manager(activation_record): for statement in node.statements: self.visit(statement) @_maybe_annotated def visit_Include(self, node: ast.Include) -> None: """Include statements should be resolved at this point""" raise self.compile_out(node) @_maybe_annotated def visit_QubitDeclaration(self, node: ast.QubitDeclaration) -> None: """Qubit declarations not supported""" activation_record = self.call_stack.peek() if node.size is not None: size = self.visit(node.size) activation_record[node.qubit.name] = [f"${x}" for x in range(size)] def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition) -> None: """Add subroutine to subroutines dict""" self.subroutines[node.name.name] = node @_maybe_annotated def visit_QuantumGateDefinition(self, node: ast.QuantumGateDefinition) -> None: """Not supporting quantum gate definitions""" raise self.compile_out(node) @_maybe_annotated def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None: """Pass over extern declarations""" def visit_Identifier(self, node: ast.Identifier) -> None: """Return the value associated with a given identifier""" try: activation_record = self.call_stack.down_stack(node.name) return activation_record[node.name] except KeyError as exc: raise SemanticError( ErrorCode.ID_NOT_FOUND, f"Identifier: {node.name} not found in call stack", ) from exc def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> bool: """Return the value of a boolean literal""" return node.value def visit_BinaryExpression(self, node: ast.BinaryExpression) -> None: """Evaluate and return the binary expression""" left = self.visit(node.lhs) right = self.visit(node.rhs) op = node.op return binary_ops[op.value](left, right) def visit_UnaryExpression(self, node: ast.UnaryExpression) -> None: """Evaluate and return the unary expression""" op = node.op return unary_ops[op.value](self.visit(node.expression)) def visit_FloatLiteral(self, node: ast.FloatLiteral) -> None: """Return the value of a float literal""" return node.value def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> None: """Return the value of an imaginary literal""" return complex(0, node.value) def visit_DurationLiteral(self, node: ast.DurationLiteral) -> None: """Return the value of a duration literal""" return node.value def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> None: """Return the value of an integer literal""" return node.value def visit_ArrayLiteral(self, node: ast.ArrayLiteral) -> None: """Return the value of an array literal""" return np.array([self.visit(val) for val in node.values]) def visit_IndexExpression(self, node: ast.IndexExpression) -> None: """Return the value of an index expression. Assumes the IndexExpression is a discrete set (ex. arr[{0, 1, 2}]), range (ex. arr[0:3:1]), or list of expressions (ex. arr[0:2, 4])""" activation_record = self.call_stack.down_stack(node.collection.name) if isinstance(node.index, ast.DiscreteSet): return activation_record[node.collection.name][self.visit(node.index)] if isinstance(node.index, ast.RangeDefinition): start, end, step = self.visit(node.index) return activation_record[node.collection.name][start:end:step] # assume list of expressions indices = [self.visit(index) for index in node.index] return activation_record[node.collection.name][indices] def visit_ReturnStatement(self, node: ast.ReturnStatement) -> None: """Return the value of a return statement""" return self.visit(node.expression) def visit_Concatenation(self, node: ast.Concatenation) -> None: """ Concatenation node visitor: joins elements in OpenQASM concatenation statement example: qasm: 'a ++ b ++ c;' Args: node (ast.Concatenation): openQASM concatenation AST node """ return np.concatenate([self.visit(node.lhs), self.visit(node.rhs)]) def quantum_gate_helper( self, node: ast.QuantumMeasurementStatement | ast.QuantumReset | ast.QuantumGate ) -> None: """ Helper function for QuantumGate, QuantumMeasurementStatement, and QuantumReset. Puts the calibration dictionary onto the stack and then adds a new activation record for the quantum gate, measurement, or reset. In the case of a QuantumGate, the function first adds the arguments to the activation record, then the statements in the measurement, reset, or gate body are visited. """ curr_nesting = self.call_stack.peek().nesting_level outer_activation_record = ActivationRecord( name="calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 1, ) outer_activation_record.members = self.calibration_scope with self.ar_context_manager(outer_activation_record): inner_activation_record = ActivationRecord( name="defcal", ar_type=ARType.DEFCAL, nesting_level=curr_nesting + 2 ) with self.ar_context_manager(inner_activation_record): signature = Mangler(node).signature() mangled_name = signature.match(self.defcal_names)[0] if isinstance(node, ast.QuantumGate): if node.modifiers: raise self.compile_out(node.modifiers) args = [self.visit(arg) for arg in node.arguments] node = self.defcal_nodes[mangled_name] inner_activation_record = self.call_stack.peek() for arg, val in zip( node.arguments, args ): # ignores Integer arguments if isinstance(arg, ast.ClassicalArgument): inner_activation_record[arg.name.name] = val for statement in self.defcal_nodes[mangled_name].body: if isinstance(statement, ast.ReturnStatement): returnval = self.visit(statement) return returnval self.visit(statement) @_maybe_annotated def visit_QuantumGate(self, node: ast.QuantumGate) -> None: """ QuantumGate node visitor: Visits and evaluates quantum gate call, at this point the gate operation should have a calibration definition (defcal). Example: qasm: defcal x90 $0 {...} >>x90 $0; -> ^^^^^^^ Args: node (ast.QuantumGate): openQASM QuantumGate AST node Optionally returns elements based on gate definition """ self.quantum_gate_helper(node) @_maybe_annotated def visit_QuantumMeasurementStatement( self, node: ast.QuantumMeasurementStatement ) -> None: """ QuantumMeasurementStatement node visitor: Visits and evaluates quantum measurement call, at this point the quantum measurement statement should have a calibration definition (defcal) Example: qasm: defcal measure $0 -> bit {...} >>b1 = measure $0; -> ^^^^^^^^^^^ Args: node (ast.QuantumMeasurementStatement): openQASM QuantumMeasurementStatement AST node Optionally allows for returns based on quantum measurement definition (gate definition) """ match node.target: case ast.Identifier(): name = node.target.name activation_record = self.call_stack.down_stack(name) activation_record[name] = self.quantum_gate_helper(node) case ast.IndexedIdentifier(): activation_record = self.call_stack.down_stack(node.target.name.name) activation_record[node.target.name.name][ [self.visit(index) for index in node.target.indices[0]] ] = self.quantum_gate_helper(node) case _: self.quantum_gate_helper(node) @_maybe_annotated def visit_QuantumReset(self, node: ast.QuantumReset) -> None: """ QuantumReset node visitor: Visits and evaluates quantum reset call, at this point the quantum reset should have a calibration definition (defcal) Example: qasm: defcal reset $0 {...} >>reset $0; -> ^^^^^^^^^ Args: node (ast.QuantumReset): openQASM QuantumReset AST node """ self.quantum_gate_helper(node) def visit_QuantumMeasurement(self, node: ast.QuantumMeasurement) -> None: """ QuantumMeasurement node visitor: Visits and evaluates quantum measurement call, at this point the quantum measurement statement should have a calibration definition (defcal). Differs from QuantumMeasurementStatement in that it does not allow for returns Example: qasm: defcal measure $0 -> bit {...} >>measure $0; ^^^^^^^^^^^ Args: node (ast.QuantumMeasurement): openQASM QuantumMeasurement AST node Optionally allows for returns based on quantum measurement definition (gate definition) """ self.quantum_gate_helper(node) def visit_ExternArgument(self, node: ast.ExternArgument) -> None: """Passes extern argument call""" def visit_DiscreteSet(self, node: ast.DiscreteSet) -> None: """Returns a set of discrete values""" discrete_set = [] for i in node.values: discrete_set.append(self.visit(i)) return set(discrete_set) def visit_RangeDefinition(self, node: ast.RangeDefinition) -> None: """Returns tuple of (start,end,step) or default values""" start = self.visit(node.start) if node.start else 0 end = self.visit(node.end) if node.end else None step = self.visit(node.step) if node.step else 1 return (start, end, step) def visit_ExpressionStatement(self, node: ast.ExpressionStatement) -> None: """Visits expression statement""" return self.visit(node.expression) def generic_visit(self, node: ast.QASMNode) -> None: LOGGER.debug("Generic visit: %s", node) @_maybe_annotated def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None: """Saves classical declaration to activation record""" activation_record = self.call_stack.peek() match node: case ast.ClassicalDeclaration(type=ast.PortType()): name = node.identifier.name # activation_record = self.call_stack.peek() activation_record[name] = self.setup.ports[name] case ast.ClassicalDeclaration( type=ast.FrameType(), init_expression=ast.FunctionCall(name=ast.Identifier("newframe")), ): call = node.init_expression assert isinstance(call, ast.FunctionCall) assert len(call.arguments) == 3 port = call.arguments[0].name frequency = self.visit(call.arguments[1]) phase = self.visit(call.arguments[2]) frame = Frame( name=node.identifier.name, port=activation_record[port], frequency=frequency, phase=phase, ) activation_record[frame.name] = frame case ast.ClassicalDeclaration(type=ast.ArrayType()): if node.init_expression is None: shapes = [self.visit(dim) for dim in node.type.dimensions] activation_record[node.identifier.name] = np.zeros(shape=shapes) else: activation_record[node.identifier.name] = self.visit( node.init_expression ) case ast.ClassicalDeclaration(type=ast.BitType()): if node.init_expression is None: size = self.visit(node.type.size) or 1 activation_record[node.identifier.name] = np.zeros(shape=size) else: activation_record[node.identifier.name] = self.visit( node.init_expression ) case ast.ClassicalDeclaration(type=ast.WaveformType()): if node.init_expression is None: activation_record[node.identifier.name] = None else: activation_record[node.identifier.name] = self.visit( node.init_expression ) case _: if node.init_expression is not None: activation_record[node.identifier.name] = self.visit( node.init_expression ) else: activation_record[node.identifier.name] = None @_maybe_annotated def visit_IODeclaration(self, node: ast.IODeclaration) -> None: """IO Declaration should be resolved""" raise self.compile_out(node) @_maybe_annotated def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration) -> None: """Saves constant declaration to activation record""" activation_record = self.call_stack.peek() activation_record[node.identifier.name] = self.visit(node.init_expression) @_maybe_annotated def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None: """ CalibrationDefinition (defcal) node visitor: Saves defcal defintions to self.defcal_nodes dictionary with a mangled name. These mangled names are also saved to a list of defcal names (self.defcal_names) Args: node (ast.CalibrationDefinition): defcal node to visit """ mangled_name = Mangler(node).signature().mangle() self.defcal_names.append(mangled_name) self.defcal_nodes[mangled_name] = node @_maybe_annotated def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None: """ CalibrationStatement node visitor: Evaluates each line in a calibration block. Updates the self.calibration_scope dictionary which maintains a dictionary of values/variables in calibration scope. Args: node (ast.CalibrationStatement): openQASM CalibrationStatement AST node """ curr_nesting = self.call_stack.peek().nesting_level outer_activation_record = ActivationRecord( name="outer_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 1, ) outer_activation_record.members = self.calibration_scope with self.ar_context_manager(outer_activation_record): inner_activation_record = ActivationRecord( name="new_calibration", ar_type=ARType.CALIBRATION, nesting_level=curr_nesting + 2, ) with self.ar_context_manager(inner_activation_record): for statement in node.body: self.visit(statement) self.calibration_scope.update(self.call_stack.peek().members) def visit_QuantumArgument(self, node: ast.QuantumArgument) -> None: """Raises error""" self.visit(node.name) @_maybe_annotated def visit_BreakStatement(self, node: ast.BreakStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_ContinueStatement(self, node: ast.ContinueStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_EndStatement(self, node: ast.EndStatement) -> None: """Raises error""" raise NotImplementedError @_maybe_annotated def visit_WhileLoop(self, node: ast.WhileLoop) -> None: """ WhileLoop node visitor: Prints out a while loop in SEQC format (which happens to be identical to openQASM format) All the statements in the block of the while loop are visited Example: qasm: while (int i < 10) {...; i=i+1;} -> seqc: while (cvar i < 10) {...; i=i+1;} Args: node (ast.WhileLoop): openQASM WhileLoop AST node context (PrinterState): state of the printer (e.g. indentation) """ if not self.visit_loops: return activation_record = ActivationRecord( name="while loop", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): # todo break if while_condition is just True (i.e. infiinite loop) while self.visit(node.while_condition): for statement in node.block: self.visit(statement) @_maybe_annotated def visit_ForInLoop(self, node: ast.ForInLoop) -> None: """ ForInLoop node visitor: Evaluates iteration range of for loop and then evaluates the body of the for loop for each iteration. Args: node (ast.ForInLoop): openQASM ForInLoop AST node Raises: Error: ErrorCode.UNHANDLED If the SET iterated over by the ForInLoop is incorrectly defined or not created using a RangeDefinition """ if not self.visit_loops: return name = node.identifier.name activation_record = ActivationRecord( name=f"for_loop_{self.call_stack.nesting_level+1}", ar_type=ARType.LOOP, nesting_level=self.call_stack.nesting_level + 1, ) with self.ar_context_manager(activation_record): start, end, step = self.visit(node.set_declaration) if end is None:
raise Error(
3
2023-11-16 17:37:29+00:00
16k
quantuminterface/qiclib
src/qiclib/code/qi_dataflow.py
[ { "identifier": "ForRange", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class ForRange(QiContextManager):\n \"\"\"Adds ForRange to program.\n If multiple cells are used inside body, a synchronisation between the cells is done before the ForRange as well as after the end of the body.\n If QiTimeVariable is used as var, loops starting at 0 are unrolled, to skip pulses/waits inside body using var as length.\n Raises exception if start, end and step are not set up properly.\"\"\"\n\n def __init__(\n self,\n var: _QiVariableBase,\n start: Union[_QiVariableBase, int, float],\n end: Union[_QiVariableBase, int, float],\n step: Union[int, float] = 1,\n ):\n from .qi_types import (\n _TypeConstraintReasonQiCommand,\n _IllegalTypeReason,\n _add_equal_constraints,\n )\n\n super().__init__()\n\n if not isinstance(var, _QiVariableBase):\n raise RuntimeError(\n \"Can only use QiVariables as control variable in ForRanges.\"\n )\n\n start_expr = QiExpression._from(start)\n end_expr = QiExpression._from(end)\n step_expr = QiExpression._from(step)\n\n var._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE)\n start_expr._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.FOR_RANGE\n )\n end_expr._type_info.add_illegal_type(QiType.STATE, _IllegalTypeReason.FOR_RANGE)\n step_expr._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.FOR_RANGE\n )\n\n _add_equal_constraints(\n QiType.TIME,\n _TypeConstraintReasonQiCommand(ForRange),\n var,\n start_expr,\n end_expr,\n step_expr,\n )\n _add_equal_constraints(\n QiType.FREQUENCY,\n _TypeConstraintReasonQiCommand(ForRange),\n var,\n start_expr,\n end_expr,\n step_expr,\n )\n _add_equal_constraints(\n QiType.NORMAL,\n _TypeConstraintReasonQiCommand(ForRange),\n var,\n start_expr,\n end_expr,\n step_expr,\n )\n\n if not isinstance(start, _QiVariableBase) and not isinstance(\n end, _QiVariableBase\n ):\n if (start > end and step >= 0) or (start < end and step <= 0):\n raise ValueError(\"Definition of ForRange faulty\")\n\n self.var = var\n self.start = start_expr\n self.end = end_expr\n self.step = step_expr\n\n self.add_associated_variable(var)\n\n if isinstance(start, _QiVariableBase):\n self.add_associated_variable(start)\n\n if start.id == var.id:\n raise RuntimeError(\"Loop variable can not be used as start value\")\n\n if isinstance(end, _QiVariableBase):\n self.add_associated_variable(end)\n\n if end.id == var.id:\n raise RuntimeError(\"Loop variable can not be used as end value\")\n\n def __exit__(self, exception_type, exception_value, traceback):\n super().__exit__(exception_type, exception_value, traceback)\n check_variable = QiVarInForRange(self.var)\n self.accept(check_variable)\n\n def accept(self, visitor, *input):\n return visitor.visit_for_range(self, *input)\n\n @property\n def is_step_positive(self) -> bool:\n return self.step > 0\n\n def _stringify(self) -> str:\n return f\"ForRange({self.var}, {self.start}, {self.end}, {self.step})\"" }, { "identifier": "If", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class If(QiContextManager):\n \"\"\"\n Add conditional logic to the program.\n If multiple cells are used inside the body, a synchronization between the cells takes place before the If.\n\n :param condition: The condition to check\n\n Example\n -------\n\n .. code-block:: python\n\n with QiJob() as job:\n q = QiCells(1)\n x = QiIntVariable(1)\n with If(x > 1):\n ... # won't be executed\n\n The If statement is most commonly used to react to qubit states in real-time:\n\n .. code-block:: python\n\n from qiclib import jobs\n\n with QiJob() as job:\n q = QiCells(1)\n state = QiStateVariable()\n jobs.Readout(q[0], state_to=state)\n with If(state = 0):\n ... # Apply some conditional logic based on the qubit state\n \"\"\"\n\n def __init__(self, condition: Optional[QiCondition] = None):\n super().__init__()\n self._else_body: List[QiCommand] = []\n if condition is None:\n raise RuntimeError(\"No QiCondition given\")\n self.condition = condition\n\n for variable in condition.contained_variables:\n self.add_associated_variable(variable)\n\n def add_else_body(self, else_body):\n self._else_body = else_body.copy()\n\n def is_followed_by_else(self) -> bool:\n return len(self._else_body) != 0\n\n def accept(self, visitor, *input):\n return visitor.visit_if(self, *input)\n\n def _stringify(self) -> str:\n return f\"If({self.condition})\"" }, { "identifier": "Parallel", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class Parallel(QiContextManager):\n \"\"\"Pulses defined in body are united in one trigger command.\"\"\"\n\n def __init__(self):\n super().__init__()\n self.entries: List[List[QiCommand]] = []\n\n def __exit__(self, exception_type, exception_value, traceback):\n temp = _QiJobReference._close_context()\n self.body += temp # So visitors also find commands in Parallel blocks.\n self.entries.append(temp)\n\n containing_cells = QiCMContainedCellVisitor()\n for command in temp:\n if not isinstance(\n command,\n (\n cQiPlay,\n cQiPlayReadout,\n cQiPlayFlux,\n cQiRotateFrame,\n cQiRecording,\n cQiWait,\n ),\n ):\n raise TypeError(\"Type not allowed inside Parallel()\", command)\n if (\n isinstance(command, (cQiRecording, cQiPlayReadout))\n and command.uses_state\n ):\n raise RuntimeError(\"Can not save to state variable inside Parallel\")\n\n try:\n if isinstance(command.length, _QiVariableBase):\n self._associated_variable_set.add(command.length)\n except KeyError:\n pass # length was QiCellProperty\n command.accept(containing_cells)\n\n self._relevant_cells.update(containing_cells.contained_cells)\n\n # If previous command is also parallel, combine by adding another parallel entry at previous command\n try:\n cmd = _QiJobReference.commands[-1]\n if isinstance(cmd, Parallel) and len(cmd.entries) < 2:\n cmd.entries.append(temp)\n cmd._associated_variable_set.update(self._associated_variable_set)\n else:\n _QiJobReference._add_command(self)\n except IndexError:\n _QiJobReference._add_command(self)\n\n class CmdTuple:\n def __init__(self, cmd: QiCommand, start: int, end: int, choke: bool = False):\n self.cmd = cmd\n self.start = start\n self.end = end\n self.choke_cmd = choke\n\n class TimeSlot:\n def __init__(self, cmd_tuples: List[Any], start, end):\n self.cmd_tuples: List[Parallel.CmdTuple] = cmd_tuples\n self.start: int = start\n self.end: int = end\n self.duration: float = 0.0\n\n def _clear_wait_commands(self, cmd_tuples: List[CmdTuple]):\n \"\"\"Clears cQiWait commands from cmd_tuples, if any trigger command is also in cmd_tuples\"\"\"\n contains_pulse = False\n\n for cmd_tuple in cmd_tuples:\n if isinstance(cmd_tuple.cmd, _cQiPlay_base):\n contains_pulse = True\n break\n\n return [\n cmd_tuple\n for cmd_tuple in cmd_tuples\n if isinstance(cmd_tuple.cmd, _cQiPlay_base) or contains_pulse is False\n ]\n\n def _clear_choke_commands(self, cmd_tuples: List[CmdTuple]):\n \"\"\"Clears choke commands, if at the same slot another Play or Readout command is present.\"\"\"\n\n contains_play = False\n contains_readout = False\n\n for cmd_tuple in cmd_tuples:\n if isinstance(cmd_tuple.cmd, cQiPlay) and cmd_tuple.choke_cmd is False:\n contains_play = True\n elif (\n isinstance(cmd_tuple.cmd, cQiPlayReadout)\n and cmd_tuple.choke_cmd is False\n ):\n contains_readout = True\n\n if contains_play is False and contains_readout is False:\n return cmd_tuples\n\n cleared_tuples = []\n\n for cmd_tuple in cmd_tuples:\n # if play command is present skip choke command for play\n if isinstance(cmd_tuple.cmd, cQiPlay):\n if cmd_tuple.choke_cmd is True and contains_play:\n continue\n\n # if PlayReadout command is present skip choke command for PlayReadout\n elif isinstance(cmd_tuple.cmd, cQiPlayReadout):\n if cmd_tuple.choke_cmd is True and contains_readout:\n continue\n\n cleared_tuples.append(cmd_tuple)\n\n return cleared_tuples\n\n def _create_time_slots(self, annotated_bodies: List[List[CmdTuple]], max_end: int):\n time_slot_list: List[Parallel.TimeSlot] = []\n for start in range(0, max_end):\n time_slot = self.TimeSlot([], start, start)\n\n # find tuples with start time == start\n for cmd_list in annotated_bodies:\n for cmd_tuple in cmd_list:\n if cmd_tuple.start == start:\n time_slot.cmd_tuples.append(cmd_tuple)\n time_slot.end = max(cmd_tuple.end, time_slot.end)\n cmd_list.remove(cmd_tuple)\n break # next cmd_list\n\n # next start value, if nothing was found\n if len(time_slot.cmd_tuples) == 0:\n continue\n\n time_slot.cmd_tuples = self._clear_wait_commands(time_slot.cmd_tuples)\n time_slot.cmd_tuples = self._clear_choke_commands(time_slot.cmd_tuples)\n\n # Add Wait command, if previous end value < start\n try:\n prev_time_slot = time_slot_list[-1]\n if prev_time_slot.end < start:\n length = util.conv_cycles_to_time(start - prev_time_slot.end)\n new_wait = self.CmdTuple(\n cQiWait(list(self._relevant_cells)[0], length),\n start=prev_time_slot.end,\n end=start,\n )\n time_slot_list.append(\n self.TimeSlot([new_wait], prev_time_slot.end, start)\n )\n except IndexError:\n pass\n\n # Adjust previous end time, if previous.end > start\n try:\n prev_time_slot = time_slot_list[-1]\n prev_time_slot.end = min(prev_time_slot.end, start)\n except IndexError:\n pass\n\n time_slot_list.append(time_slot)\n\n # Add final wait, if previous.end != max_end\n try:\n prev_time_slot = time_slot_list[-1]\n if prev_time_slot.end < max_end:\n length = util.conv_cycles_to_time(max_end - prev_time_slot.end)\n new_wait = self.CmdTuple(\n cQiWait(list(self._relevant_cells)[0], length),\n start=prev_time_slot.end,\n end=max_end,\n )\n time_slot_list.append(\n self.TimeSlot([new_wait], prev_time_slot.end, max_end)\n )\n except IndexError:\n pass\n\n # calculate duration of time slot\n for slot in time_slot_list:\n slot.duration = util.conv_cycles_to_time(slot.end - slot.start)\n\n return time_slot_list\n\n def _generate_command_body(self, cell, sequencer):\n \"\"\"Combines the parallel sequences to one command body.\"\"\"\n\n parallel_bodies: List[List[Parallel.CmdTuple]] = []\n\n max_end = 0\n\n # Generate annotated list of commands with start and end cycle\n for cmd_list in self.entries:\n commands: List[Parallel.CmdTuple] = []\n start: int = 0\n end: int = 0\n for cmd in cmd_list:\n var_pulse = False\n\n if cell not in cmd._relevant_cells:\n continue # skip commands for other cells\n\n if isinstance(cmd.length, _QiVariableBase):\n reg = sequencer.get_var_register(cmd.length)\n\n if reg.valid is False or reg.value is None:\n raise RuntimeError(\n \"Variable inside parallel not initialised or invalidated\"\n )\n\n length = reg.value\n\n if isinstance(cmd, (cQiPlay, cQiPlayReadout)):\n var_pulse = True\n else:\n length = util.conv_time_to_cycles(cmd.length, \"ceil\")\n\n if length == 0:\n continue # skip commands with length 0\n\n if isinstance(cmd, cQiRecording) or (\n isinstance(cmd, cQiPlayReadout)\n and isinstance(cmd.recording, cQiRecording)\n ):\n end += length + util.conv_time_to_cycles(\n sequencer.recording_delay, \"ceil\"\n )\n else:\n end += length\n\n cmd_duration = self.CmdTuple(cmd, start, end)\n commands.append(cmd_duration)\n\n if var_pulse:\n # Add parallel choke command after current command, if variable length is used\n parallel_choke = [self.CmdTuple(cmd, end, end + 1, choke=True)]\n parallel_bodies.append(parallel_choke)\n\n max_end = max(end + 1, max_end) # +1 to account for choke command\n else:\n max_end = max(end, max_end)\n\n start = end\n\n parallel_bodies.append(commands)\n\n return self._create_time_slots(parallel_bodies, max_end)\n\n def accept(self, visitor, *input):\n return visitor.visit_parallel(self, *input)\n\n def _stringify(self) -> str:\n return \"Parallel\"" }, { "identifier": "QiCell", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiCell:\n \"\"\"A QiCell is an abstract representation of the qubit/cell the program is run on.\n Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object.\n For a single :python:`QiCell`, use instead :python:`QiCells(1)`\n\n A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context.\n\n The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`.\n For this, index the :python:`QiCell` object using the name of the property:\n\n .. code-block:: python\n\n q: QiCell = ...\n t1_time = q[\"t1\"]\n\n The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a\n :class:`QiJob` and providing the actual sample.\n\n **Tasks of the QiCell**:\n\n - Saves the pulses needed for program execution.\n - Provides a dictionary functionality to define commonly used durations/properties.\n - Implements a Sequencer object, which contains the assembler program after compilation.\n\n :param cellID: A unique ID\n :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob`\n \"\"\"\n\n def __init__(self, cellID: int):\n if not isinstance(_QiJobReference, QiJob):\n raise RuntimeError(\"QiCell can't be used outside of QiJob.\")\n\n self.cellID = cellID\n self.manipulation_pulses: List[QiPulse] = []\n self.flux_pulses: List[QiPulse] = []\n self.readout_pulses: List[QiPulse] = []\n self._result_container: Dict[str, QiResult] = {}\n # The order in which recorded values are assigned to which result container\n self._result_recording_order: List[QiResult] = []\n self._unresolved_property: Set[QiCellProperty] = set()\n self._job_ref = _QiJobReference\n self._relevant_vars: Set[_QiVariableBase] = set()\n\n # These attributes are determined by dataflow analyses\n self._initial_manip_freq: float = None\n self._initial_readout_freq: float = None\n self._initial_rec_offset: float = None\n\n self._rec_length: Union[int, float, QiCellProperty] = None\n\n self._properties: Dict[QiCellProperty, Any] = {}\n\n def __getitem__(self, key):\n if _QiJobReference != self._job_ref:\n raise RuntimeError(\n \"Tried getting values for cells registered to other QiJob\"\n )\n\n prop = self._properties.get(key, QiCellProperty(self, key))\n\n if isinstance(prop, QiCellProperty):\n self._unresolved_property.add(key)\n return prop\n\n def __setitem__(self, key, value):\n if _QiJobReference != self._job_ref:\n raise RuntimeError(\n \"Tried setting values for cells registered to other QiJob\"\n )\n self._properties[key] = value\n\n def __call__(self, qic):\n return qic.cell[self.qic_cell]\n\n def get_properties(self):\n return self._properties.copy()\n\n def add_pulse(self, pulse: QiPulse):\n if pulse not in self.manipulation_pulses:\n self.manipulation_pulses.append(pulse)\n\n if len(self.manipulation_pulses) > 13:\n raise RuntimeError(\"Too many pulses in use\")\n\n return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved\n\n @property\n def initial_manipulation_frequency(self):\n if self._initial_manip_freq is None:\n if len(self.manipulation_pulses) > 0:\n warnings.warn(\n \"Manipulation pulses without frequency given, using 90 MHz.\"\n )\n return 90e6 # Default frequency\n freq = self._initial_manip_freq\n return freq() if isinstance(freq, QiCellProperty) else freq\n\n def add_recording_length(self, length):\n if self._rec_length is None:\n self._rec_length = length\n elif (\n not self._rec_length._equal_syntax(length)\n if isinstance(self._rec_length, QiExpression)\n else self._rec_length != length\n ):\n raise RuntimeError(\n f\"Cell {self.cellID}: Multiple definitions of recording length used.\"\n )\n\n def add_readout_pulse(self, pulse: QiPulse):\n if pulse not in self.readout_pulses:\n self.readout_pulses.append(pulse)\n\n if len(self.readout_pulses) > 13:\n raise RuntimeError(\"Too many pulses in use\")\n\n return self.readout_pulses.index(pulse) + 1 # index 0 and 15 are reserved\n\n @property\n def initial_readout_frequency(self):\n if self._initial_readout_freq is None:\n if len(self.readout_pulses) > 0:\n warnings.warn(\"Readout pulses without frequency given, using 30 MHz.\")\n return 30e6 # Default frequency\n freq = self._initial_readout_freq\n return freq() if isinstance(freq, QiCellProperty) else freq\n\n @property\n def recording_length(self):\n \"\"\"the length of the recording pulse\"\"\"\n if self._rec_length is not None:\n return (\n self._rec_length()\n if isinstance(self._rec_length, QiCellProperty)\n else self._rec_length\n )\n\n return 0\n\n @property\n def initial_recording_offset(self):\n \"\"\"the recording offset in seconds\"\"\"\n if self._initial_rec_offset is not None:\n return (\n self._initial_rec_offset()\n if isinstance(self._initial_rec_offset, QiCellProperty)\n else self._initial_rec_offset\n )\n\n return 0\n\n def get_result_container(self, result: str) -> QiResult:\n if result in self._result_container:\n return self._result_container[result] # was already added\n else:\n box = QiResult(result)\n box._cell = self\n self._result_container[result] = box\n return box\n\n def add_variable(self, var: _QiVariableBase):\n self._relevant_vars.add(var)\n\n def get_number_of_recordings(self):\n return len(self._result_recording_order)\n\n def set_default_readout(self, pulse):\n pass\n\n def reset(self):\n for container in self._result_container.values():\n container.data = []\n\n def data(\n self, name: Optional[str] = None\n ) -> Union[Dict[str, np.ndarray], np.ndarray]:\n \"\"\"\n Returns the data after running an experiment.\n\n When calling this function without a name, i.e., calling :python:`cell.data()`,\n returns a dictionary containing the results as numpy arrays.\n\n When calling this function with a name, i.e., calling :python:`cell.data(\"result_name\")`,\n returns the whole dictionary.\n\n :param name: The name of the data\n :return: A single result, or a dictionary of result names mapped to results.\n \"\"\"\n if name is None:\n result_dict = {}\n for key, container in self._result_container.items():\n result_dict.update({key: container.get()})\n return result_dict\n\n else:\n return self._result_container[name].get()\n\n def _resolve_properties(self, len_dict: Dict[QiCellProperty, Any]):\n keys = list(self._unresolved_property)\n\n missing_keys = self._unresolved_property.difference(len_dict.keys())\n if missing_keys:\n raise RuntimeError(\n f\"Cell {self.cellID}: Not all properties for job could be resolved. \"\n f\"Missing properties: {missing_keys}\"\n )\n\n for key in keys:\n self._properties[key] = len_dict[key]\n\n @property\n def has_unresolved_properties(self):\n return len(self._unresolved_property) > 0\n\n def _get_unresolved_properties(self):\n return [\n key\n for key in list(self._unresolved_property)\n if self._properties.get(key) is None\n ]\n\n def __str__(self) -> str:\n return f\"QiCell({self.cellID})\"" }, { "identifier": "QiCommand", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiCommand:\n \"\"\"Base class of every Job command.\n Provides _relevant_cells, containing every cell used for the execution of the command.\n Provides _associated_variable_set, containing every variable needed for the execution of the command.\n \"\"\"\n\n def __init__(self) -> None:\n self._associated_variable_set = QiVariableSet()\n self._relevant_cells: Set[QiCell] = set()\n\n @abstractmethod\n def accept(self, visitor, *input):\n raise RuntimeError(\n f\"{self.__class__} doesn't implement `accept`. This is a bug.\"\n )\n\n def is_variable_relevant(self, variable: _QiVariableBase) -> bool:\n return variable in self._associated_variable_set\n\n def add_associated_variable(self, x):\n if isinstance(x, _QiVariableBase):\n self._associated_variable_set.add(x)\n\n def __str__(self) -> str:\n return \"cQiCommand\"\n\n def _stringify(self) -> str:\n raise NotImplementedError(f\"_stringify not implemented for {repr(self)}\")" }, { "identifier": "QiContextManager", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiContextManager(QiCommand):\n \"\"\"Base Class for If, Else, ForRange and Parallel.\n Defines functions for storing commands.\"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.body: List[QiCommand] = []\n\n def __enter__(self):\n _QiJobReference._open_new_context()\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.body = _QiJobReference._close_context()\n _QiJobReference._add_command(self)\n\n def accept(self, visitor, *input):\n return visitor.visit_context_manager(self, *input)" }, { "identifier": "QiJob", "path": "src/qiclib/code/qi_jobs.py", "snippet": "class QiJob:\n \"\"\"\n Container holding program, cells and qi_result containers for execution of program.\n Builds the job with its properties\n\n :param skip_nco_sync: if the NCO synchronization at the beginning should be skipped\n :param nco_sync_length: how long to wait after the nco synchronization\n \"\"\"\n\n def __init__(\n self,\n skip_nco_sync=False,\n nco_sync_length=0,\n ):\n self.qi_results: List[QiResult] = []\n self.cells = []\n self.skip_nco_sync = skip_nco_sync\n self.nco_sync_length = nco_sync_length\n\n self._description = _JobDescription()\n\n # Build\n self._performed_analyses = False\n self._build_done = False\n self._arranged_cells: List[Optional[QiCell]] = []\n self._var_reg_map: Dict[_QiVariableBase, Dict[QiCell, int]] = {}\n\n # Run\n self._custom_processing = None\n self._custom_data_handler = None\n\n def __enter__(self):\n # pylint: disable=global-statement\n global _QiJobReference\n _QiJobReference = self\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n for cmd in self.commands:\n cmd.accept(QiTypeFallbackVisitor())\n\n for cmd in self.commands:\n cmd.accept(QiPostTypecheckVisitor())\n\n _QiVariableBase.reset_str_id()\n\n # pylint: disable=global-statement\n global _QiJobReference\n _QiJobReference = None\n\n def _open_new_context(self):\n self._description.open_new_context()\n\n def _close_context(self):\n return self._description.close_context()\n\n def _add_command(self, command):\n self._description.add_command(command)\n\n @property\n def commands(self):\n \"\"\"returns the commands of the job\"\"\"\n return self._description._commands\n\n def _register_cells(self, cells: List[QiCell]):\n if len(self.cells) > 0:\n raise RuntimeError(\"Can only register one set of cells at a QiJob.\")\n\n self.cells = cells\n\n def _run_analyses(self):\n \"\"\"\n Executes needed (dataflow) analyses.\n These mutate the commands in QiJob by inserting additional instructions, therefore\n they should only run once, in order to avoid duplicate instructions.\n \"\"\"\n from .analysis.qi_insert_mem_parameters import (\n insert_recording_offset_store_commands,\n insert_manipulation_pulse_frequency_store_commands,\n insert_readout_pulse_frequency_store_commands,\n )\n\n if not self._performed_analyses:\n insert_recording_offset_store_commands(self)\n insert_manipulation_pulse_frequency_store_commands(self)\n insert_readout_pulse_frequency_store_commands(self)\n\n self._performed_analyses = True\n\n def _simulate_recordings(self) -> Dict[Any, List[cQiRecording]]:\n \"\"\"\n Simulates the order cQiRecording executions.\n The result of this simulation is used to disentangle the recordings buffer\n and reassociate the individual recording results with their corresponding Recording commands.\n It might return more elements than are recorded during the real execution.\n \"\"\"\n\n # We first check if there are Recording commands at positions which we can not simulate.\n # i.e. If-Else, ForRanges with start or end that are neither constant nor other loop variables.\n # If this is the case we cannot simulate the order.\n visitor = QiResultCollector()\n for cmd in self.commands:\n cmd.accept(visitor)\n\n if len(visitor.found_qi_results) == 0:\n return {cell: [] for cell in self.cells}\n elif visitor.recording_in_if:\n raise RuntimeError(\"Recording command within If-Else statement.\")\n\n # Next we simulate all loops and collect the respective Recording commands inside.\n from .qi_simulate import Simulator\n\n simulator = Simulator(self.cells)\n simulator._simulate(self.commands)\n\n return simulator.cell_recordings\n\n def _build_program(\n self, sample: Optional[QiSample] = None, cell_map: Optional[List[int]] = None\n ):\n if sample is not None and cell_map is not None:\n sample = sample._arrange_for_controller()\n sample = [sample[m] if m < len(sample) else None for m in cell_map]\n\n if cell_map is None:\n cell_map = list(range(len(self.cells)))\n\n # TODO Check that this works with None and right order now\n self._resolve_properties(sample)\n\n for cell in self.cells:\n if len(cell._get_unresolved_properties()) > 0:\n raise RuntimeError(\n f\"Unresolved properties {cell._get_unresolved_properties()} at cell {cell}\"\n )\n\n self._run_analyses()\n\n sim_result = self._simulate_recordings()\n for cell in self.cells:\n cell._result_recording_order = list(\n map(\n lambda x: x.result_box,\n filter(lambda x: x.result_box is not None, sim_result[cell]),\n )\n )\n\n prog_builder = QiProgramBuilder(\n self.cells,\n cell_map,\n self._description._commands.copy(),\n self.skip_nco_sync,\n self.nco_sync_length,\n )\n\n self.cell_seq_dict = prog_builder.build_program()\n self._var_reg_map = prog_builder.get_all_variables()\n self._build_done = True\n\n def _get_sequencer_codes(self):\n return [\n [\n instr.get_riscv_instruction()\n for instr in self.cell_seq_dict[cell].instruction_list\n ]\n for cell in self.cells\n ]\n\n def create_experiment(\n self,\n controller,\n sample: Optional[QiSample] = None,\n averages: int = 1,\n cell_map: Optional[List[int]] = None,\n data_collection=None,\n use_taskrunner=False,\n ):\n from ..experiment.qicode.base import QiCodeExperiment\n\n exp = QiCodeExperiment(\n *self._prepare_experiment_params(\n controller, sample, averages, cell_map, data_collection, use_taskrunner\n )\n )\n\n if data_collection is None:\n if self._custom_processing is not None:\n exp._taskrunner.update(self._custom_processing)\n if self._custom_data_handler is not None:\n exp._data_handler_factory = DataHandler.get_custom_wrapper_factory(\n self._custom_data_handler\n )\n\n # Provide a human-readable description of the execution\n if cell_map is None:\n cell_map = list(range(len(self.cells)))\n str_map = \", \".join([f\"q[{i}] -> sample[{m}]\" for i, m in enumerate(cell_map)])\n exp._job_representation = f\"{self}\\n\\nmapped as {str_map} to\\n\\n{sample}\"\n\n return exp\n\n def _prepare_experiment_params(\n self,\n controller,\n sample: Optional[QiSample] = None,\n averages: int = 1,\n cell_map: Optional[List[int]] = None,\n data_collection=None,\n use_taskrunner=False,\n ):\n if len(self.cells) > len(controller.cell):\n raise IndexError(\n f\"This job requires {len(self.cells)} cells but only \"\n f\"{len(controller.cell)} are available in the QiController.\"\n )\n\n if data_collection is None:\n if self._custom_processing is None:\n data_collection = \"average\"\n else:\n data_collection = \"custom\"\n\n # If float, convert averages to int\n averages = int(averages)\n\n if sample is None:\n sample = QiSample(len(controller.cell))\n elif len(sample) < len(self.cells):\n raise ValueError(\n \"Need to submit a QiSample with at least as many cells as the job \"\n f\"has ({len(self.cells)}), but only {len(sample)} provided.\"\n )\n\n if cell_map is None:\n # Use the first cells of the sample\n cell_map = list(range(len(self.cells)))\n else:\n if len(cell_map) != len(self.cells):\n raise ValueError(\n \"cell_map needs to have as many entries as the job has cells, but \"\n f\"{len(cell_map)} entries given and {len(self.cells)} required!\"\n )\n if len(set(cell_map)) != len(cell_map):\n raise ValueError(\"Duplicate values not allowed in cell_map!\")\n if any(m < 0 or m >= len(sample) for m in cell_map):\n raise IndexError(\n \"cell_map values can only point to valid indices within the passed\"\n f\" QiSample object, i.e. values between 0 and {len(sample) - 1}.\"\n )\n\n # Translate cell_map from sample cells (\"cells\") to QiController cells\n cell_map = [sample.cell_map[c] for c in cell_map]\n\n if any(c < 0 or c >= len(controller.cell) for c in cell_map):\n raise ValueError(\n \"The QiSample cell_map can only reference available QiController \"\n f\"cells, i.e. between 0 and {len(controller.cell) - 1}.\"\n )\n\n self._build_program(sample, cell_map)\n\n for_range_list = []\n\n for cell in self.cells:\n for_range_list.append(self.cell_seq_dict[cell]._for_range_list)\n\n return (\n controller,\n self.cells,\n self._get_sequencer_codes(),\n averages,\n for_range_list,\n cell_map,\n self._var_reg_map,\n data_collection,\n use_taskrunner,\n )\n\n def run(\n self,\n controller,\n sample: Optional[QiSample] = None,\n averages: int = 1,\n cell_map: Optional[List[int]] = None,\n data_collection=None,\n use_taskrunner=False,\n ):\n \"\"\"executes the job and returns the results\n\n :param controller: the QiController on which the job should be executed\n :param sample: the QiSample object used for execution of pulses and extracts parameters for the experiment\n :param averages: the number of executions that should be averaged, by default 1\n :param cell_map: A list containing the indices of the cells\n :param data_collection: the data_collection mode for the result, by default \"average\"\n :param use_taskrunner: if the execution should be handled by the Taskrunner\n Some advanced schemes and data_collection modes are currently only supported\n by the Taskrunner and not yet by a native control flow.\n \"\"\"\n exp = self.create_experiment(\n controller, sample, averages, cell_map, data_collection, use_taskrunner\n )\n exp.run()\n\n def run_with_data_callback(self, on_new_data: Callable[[dict], None]):\n pass\n\n def run_streamed(self):\n pass\n\n def set_custom_data_processing(\n self,\n file: str,\n params: Optional[List] = None,\n converter: Optional[Callable[[List], List]] = None,\n mode: Union[TaskRunner.DataMode, str] = TaskRunner.DataMode.INT32,\n data_handler: Optional[Callable[[List[QiCell], DataProvider], None]] = None,\n ):\n from qiclib.experiment.qicode.base import _TaskrunnerSettings\n\n if isinstance(mode, str):\n mode = TaskRunner.DataMode[mode.upper()]\n\n self._custom_processing = _TaskrunnerSettings(\n file, \"QiCode[Custom]\", params, mode, converter\n )\n self._custom_data_handler = data_handler\n\n def print_assembler(\n self,\n cells: Optional[QiCells] = None,\n cell_index=0,\n cell_map: Optional[List[int]] = None,\n ):\n \"\"\"\n Prints the commands as assembler code\n\n :param cells: the QiCells object for execution of pulses and saving result\n :param cell_index: the index of the cell in QiCells\n \"\"\"\n print(f\"Print program for cell index {cell_index}\")\n self._build_program(cells, cell_map)\n\n cell = self.cells[cell_index]\n\n self.cell_seq_dict[cell].print_assembler()\n\n def _resolve_properties(self, sample: QiSample):\n # Check if any job cell has unresolved properties -> if not, return\n if not any(cell.has_unresolved_properties for cell in self.cells):\n return\n\n if sample is None:\n raise ValueError(\"QiSample needs to be passed to resolve job properties!\")\n\n for i, cell in enumerate(self.cells):\n if cell.has_unresolved_properties:\n if i < len(sample) and sample[i] is not None:\n cell._resolve_properties(sample[i]._properties)\n else:\n raise ValueError(\n f\"Cell {i} of the job has unresolved properties but no QiSample \"\n \"cell is specified for it! Check your cell_map.\"\n )\n\n def __str__(self) -> str:\n from .qi_visitor import QiStringifyJob\n\n stringify_job = QiStringifyJob()\n return stringify_job.stringify(self)" } ]
from abc import abstractmethod from enum import Enum from typing import Optional, List, Set, Tuple, Union, Dict from copy import copy from qiclib.code.qi_var_definitions import ( _QiVariableBase, QiExpression, ) from .qi_jobs import ( ForRange, If, Parallel, QiCell, QiCommand, QiContextManager, QiJob, )
11,223
): """Implementation of (a fairly naive) worklist algorithm which performs the dataflow analysis, with the given visitor.""" queue = list(cfg.nodes) cfg.add_value(name, initial) while len(queue) != 0: next = queue.pop(0) preds = list(predecessors(next)) if len(preds) != 0: input = preds[0].node.value_map[name] for pred in preds[1:]: input = input.merge(pred.node.value_map[name]) else: input = initial if next.type == _CFGNode.Type.COMMAND: output = next.command.accept(visitor, input, next) else: output = input original = next.value_map[name] if output != original: next.value_map[name] = output for succ in successors(next): queue.append(succ.node) class FlatLatticeValue(DataflowValue): """ FlatLatticeValue is a commonly used abstract value. * undefined: represents no value. * value: represents a single value. * no_const: represents all values. One should not use this constructor directly but instead use the :meth:`undefined` :meth:`no_const` and :meth:`value` class functions instead. """ class Type(Enum): UNDEFINED = 0 VALUE = 1 NO_CONST = 2 def __init__(self, type, value: QiExpression): self.type = type self.value = value @staticmethod def undefined(): return FlatLatticeValue(FlatLatticeValue.Type.UNDEFINED, None) @staticmethod def no_const(): return FlatLatticeValue(FlatLatticeValue.Type.NO_CONST, None) @staticmethod def value(value): return FlatLatticeValue(FlatLatticeValue.Type.VALUE, value) def merge(self, other): assert isinstance(other, FlatLatticeValue) if self.type == other.type and self.type == FlatLatticeValue.Type.VALUE: if self.value._equal_syntax(other.value): return self else: return FlatLatticeValue.no_const() else: if self.type == FlatLatticeValue.Type.UNDEFINED: return other elif other.type == FlatLatticeValue.Type.UNDEFINED: return self elif self.type == FlatLatticeValue.Type.NO_CONST: return self elif other.type == FlatLatticeValue.Type.NO_CONST: return other else: raise NotImplementedError(f"Merge of {other.type} not implemented") def __eq__(self, other): return self.type == other.type and ( self.value._equal_syntax(other.value) if self.type == FlatLatticeValue.Type.VALUE else True ) def __str__(self): if self.type == FlatLatticeValue.Type.VALUE: return f"{self.value}" else: return str(self.type) def __repr__(self): if self.type == FlatLatticeValue.Type.UNDEFINED: return "<undefined>" elif self.type == FlatLatticeValue.Type.VALUE: return f"<value: {self.value}>" elif self.type == FlatLatticeValue.Type.NO_CONST: return "<no_const>" else: raise NotImplementedError(f"__repr__ for type {self.type} not implemented") class CellValues(DataflowValue): """ DataflowValue which generalises FlatLatticeValue so every cell has its own FlatLatticeValue. """ def __init__(self, values=None): self.values = copy(values or {}) @classmethod
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This module provides basic infrastructure to perform dataflow analyses on qicode programs. Dataflow analyses are computed on the control flow graph (CFG) of a QiJob which should be created when necessary. The dataflow analysis itself is performed in using a standard worklist algorithm. The abstract domain is modeled using DataflowValue. Its merge function represents the supremum calculation. It is recommended to treat DataflowValues as immutable. """ class _CFGNode: class Type(Enum): START = 0 END = 1 COMMAND = 2 class SrcEdgeType(Enum): """CFG Edge information about the source node""" IF_TRUE = 0 IF_FALSE = 1 FOR_BODY = 2 FOR_END = 4 NORMAL = 5 def __str__(self): return { _CFGNode.SrcEdgeType.IF_TRUE: "if_true", _CFGNode.SrcEdgeType.IF_FALSE: "if_false", _CFGNode.SrcEdgeType.FOR_BODY: "for_true", _CFGNode.SrcEdgeType.FOR_END: "for_end", _CFGNode.SrcEdgeType.NORMAL: "normal", }[self] class DestEdgeType(Enum): """CFG Edge information about the destination node""" FOR_BODY_RETURN = 0 FOR_ENTRY = 1 NORMAL = 2 def __str__(self): return { _CFGNode.DestEdgeType.FOR_BODY_RETURN: "for_body_ret", _CFGNode.DestEdgeType.FOR_ENTRY: "for_entry", _CFGNode.DestEdgeType.NORMAL: "normal", }[self] class Neighbor: """Combination of node and both edge types. Each edge in the CFG is represented by an instance of this class""" def __init__( self, neighbor: "_CFGNode", src_edge_type: "_CFGNode.SrcEdgeType", dest_edge_type: Optional["_CFGNode.DestEdgeType"] = None, ): # Default argument didn't work for me in this case. if dest_edge_type is None: dest_edge_type = _CFGNode.DestEdgeType.NORMAL self.node = neighbor # Information about the edge for the src node # (for example, if this edge goes to the 'else' block of an 'if' statement.) self.src_edge_type = src_edge_type # Information about the edge for the destination node # (for example, if the edge loops back from the body of a for statement.) self.dest_edge_type = dest_edge_type _cfg_node_next_id = 1 def __init__( self, type: Union["_CFGNode.Type", QiCommand], instruction_list, index, *predecessors: "Tuple[_CFGNode, _CFGNode.SrcEdgeType]", ): if isinstance(type, QiCommand): self.type = _CFGNode.Type.COMMAND self.command = type else: assert isinstance(type, _CFGNode.Type) self.type = type # This field is used to associated arbitrary data with every node. # For example, a dataflow analysis might use this dictionary to # the nodes current abstract value. self.value_map: Dict[str, CellValues] = {} self.predecessors: Set[_CFGNode.Neighbor] = set() self.successors: Set[_CFGNode.Neighbor] = set() # Used to find commands in job command list, so we can insert new instruction before or after this # command. self.instruction_list = instruction_list self.instruction_index = index self.id = _CFGNode._cfg_node_next_id _CFGNode._cfg_node_next_id += 1 self.connect_predecessors(*predecessors) def connect_successors(self, *successors: "_CFGNode.Neighbor"): assert all(map(lambda x: isinstance(x, _CFGNode.Neighbor), successors)) for succ_neighbor in successors: succ = succ_neighbor.node pred_neighbor = copy(succ_neighbor) pred_neighbor.node = self self.successors.add(succ_neighbor) succ.predecessors.add(pred_neighbor) def connect_predecessors(self, *predecessors: "_CFGNode.Neighbor"): assert all(map(lambda x: isinstance(x, _CFGNode.Neighbor), predecessors)) for pred_neighbor in predecessors: pred = pred_neighbor.node succ_neighbor = copy(pred_neighbor) succ_neighbor.node = self self.predecessors.add(pred_neighbor) pred.successors.add(succ_neighbor) class _CFG: """Constructs a control flow graph (CFG) from the commands of a QiJob. The end node does not contain a command, if the last top level command is an If-else or ForRange """ def __init__(self, job: QiJob): self.nodes: Set[_CFGNode] = set() start, end = recursive_build_sub_cfg(job.commands, self.nodes) self.end = _CFGNode(_CFGNode.Type.END, None, None, *end) self.start = _CFGNode(_CFGNode.Type.START, None, None) self.start.connect_successors( _CFGNode.Neighbor(start, _CFGNode.SrcEdgeType.NORMAL) ) def node_iterator(self): visited = set() stack = [self.start] while len(stack) > 0: node = stack.pop() visited.add(node) yield node for successor in node.successors: successor = successor.node if successor not in visited: stack.append(successor) def add_value(self, key, initial): for node in self.node_iterator(): if key not in node.value_map: node.value_map[key] = initial def dump_dot_graph(self, path): """Dump the current cfg topology as a dot file for inspecting and debugging purposes.""" with open(path, "w", encoding="utf-8") as f: f.write("\ndigraph {\n") queue = [self.start] node_visited_or_in_queue = set() node_visited_or_in_queue.add(self.start) while len(queue) > 0: node = queue.pop(0) node_attributes = "\n".join( [f"{name} = {value}" for name, value in node.value_map.items()] ) if node.type == _CFGNode.Type.COMMAND: if isinstance(node.command, QiCommand): node_text = f"{node.command._stringify()}" else: node_text = f"{node.command}" label = f"{node_text}\n{node_attributes}" shape = "box" elif node.type == _CFGNode.Type.START: label = f"start\n{node_attributes}" shape = "oval" elif node.type == _CFGNode.Type.END: label = f"end\n{node_attributes}" shape = "oval" escaped_label = label.translate(str.maketrans({'"': '\\"'})) f.write(f'\t{node.id} [shape={shape}, label="{escaped_label}"];\n') for successor in node.successors: src_edge_type = successor.src_edge_type dest_edge_type = successor.dest_edge_type successor = successor.node assert isinstance(successor, _CFGNode) label = [] if src_edge_type is not _CFGNode.SrcEdgeType.NORMAL: label.append(f"{src_edge_type}") if dest_edge_type is not _CFGNode.DestEdgeType.NORMAL: label.append(f"{dest_edge_type}") label = ", ".join(label) node_label = f'[label="{label}"]' f.write(f"\t{node.id} -> {successor.id} {node_label};\n") if successor not in node_visited_or_in_queue: queue.append(successor) node_visited_or_in_queue.add(successor) f.write("}") def recursive_build_sub_cfg( commands: List[QiCommand], nodes ) -> Tuple[_CFGNode, List[_CFGNode.Neighbor]]: """ Constructs the nodes and edges for a CFG containing provided commands. `nodes` accumulates all nodes of the CFG. """ assert len(commands) > 0 prev: List[_CFGNode.Neighbor] = [] for idx, command in enumerate(commands, 0): if isinstance(command, If): node = _CFGNode(command, commands, idx, *prev) nodes.add(node) if len(command.body) > 0: body_start, body_end = recursive_build_sub_cfg(command.body, nodes) node.connect_successors( _CFGNode.Neighbor(body_start, _CFGNode.SrcEdgeType.IF_TRUE) ) prev = body_end else: prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.IF_TRUE)] if command.is_followed_by_else(): # len(command._else_body) > 0 else_start, else_end = recursive_build_sub_cfg( command._else_body, nodes ) node.connect_successors( _CFGNode.Neighbor(else_start, _CFGNode.SrcEdgeType.IF_FALSE) ) prev += else_end else: prev.append(_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.IF_FALSE)) elif isinstance(command, ForRange): for p in prev: p.dest_edge_type = _CFGNode.DestEdgeType.FOR_ENTRY node = _CFGNode(command, commands, idx, *prev) nodes.add(node) if len(command.body) > 0: body_start, body_end = recursive_build_sub_cfg(command.body, nodes) dest_edge_type = ( _CFGNode.DestEdgeType.FOR_ENTRY if isinstance(body_start.command, ForRange) else None ) node.connect_successors( _CFGNode.Neighbor( body_start, _CFGNode.SrcEdgeType.FOR_BODY, dest_edge_type ) ) for b in body_end: b.dest_edge_type = _CFGNode.DestEdgeType.FOR_BODY_RETURN node.connect_predecessors(*body_end) else: node.connect_predecessors( _CFGNode.Neighbor( node, _CFGNode.SrcEdgeType.FOR_BODY, _CFGNode.DestEdgeType.FOR_BODY_RETURN, ) ) prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.FOR_END)] elif isinstance(command, Parallel): # Parallel Blocks have somewhat tricky semantics and don't fit neatly into a CFG schema. # Therefore we just treat them as a single command and the respective analyses can deal with them # as they see fit. node = _CFGNode(command, commands, idx, *prev) nodes.add(node) prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.NORMAL)] else: assert not isinstance( command, QiContextManager ), "Context manager should probably be handled separately." node = _CFGNode(command, commands, idx, *prev) nodes.add(node) prev = [_CFGNode.Neighbor(node, _CFGNode.SrcEdgeType.NORMAL)] if idx == 0: start = node end = prev return start, end class DataflowValue: """ Interface for the abstract value used by dataflow analyses An implementation of DataflowValue should be a bounded lattice. """ @abstractmethod def merge(self, other: "DataflowValue") -> "DataflowValue": raise NotImplementedError( f"{self.__class__} doesn't implement merge function. This is a bug." ) class DataflowVisitor: """Visitor for dataflow analyses. The input (of type DataflowValue) is in the input field. The resulting output is returned by the respective visitor methods.""" def visit_cell_command(self, cell_cmd, input, node): return input def visit_context_manager(self, context_manager, input, node): return input def visit_if(self, if_cm, input, node): return input def visit_parallel(self, parallel_cm, input, node): return input def visit_for_range(self, for_range_cm, input, node): return input def visit_variable_command(self, variable_cmd, input, node): return input def visit_assign_command(self, assign_cmd, input, node): return input def visit_declare_command(self, declare_cmd, input, node): return input def visit_sync_command(self, sync_cmd, input, node): return input def visit_asm_command(self, asm_command, input, node): return input def forward_dataflow( cfg: _CFG, name, visitor: "DataflowVisitor", initial: DataflowValue, ): dataflow( cfg, name, visitor, initial, lambda x: x.predecessors, lambda x: x.successors, ) def reverse_dataflow( cfg: _CFG, name, visitor: "DataflowVisitor", initial: DataflowValue, ): dataflow( cfg, name, visitor, initial, lambda x: x.successors, lambda x: x.predecessors, ) def dataflow( cfg: _CFG, name, visitor: "DataflowVisitor", initial: DataflowValue, predecessors, successors, ): """Implementation of (a fairly naive) worklist algorithm which performs the dataflow analysis, with the given visitor.""" queue = list(cfg.nodes) cfg.add_value(name, initial) while len(queue) != 0: next = queue.pop(0) preds = list(predecessors(next)) if len(preds) != 0: input = preds[0].node.value_map[name] for pred in preds[1:]: input = input.merge(pred.node.value_map[name]) else: input = initial if next.type == _CFGNode.Type.COMMAND: output = next.command.accept(visitor, input, next) else: output = input original = next.value_map[name] if output != original: next.value_map[name] = output for succ in successors(next): queue.append(succ.node) class FlatLatticeValue(DataflowValue): """ FlatLatticeValue is a commonly used abstract value. * undefined: represents no value. * value: represents a single value. * no_const: represents all values. One should not use this constructor directly but instead use the :meth:`undefined` :meth:`no_const` and :meth:`value` class functions instead. """ class Type(Enum): UNDEFINED = 0 VALUE = 1 NO_CONST = 2 def __init__(self, type, value: QiExpression): self.type = type self.value = value @staticmethod def undefined(): return FlatLatticeValue(FlatLatticeValue.Type.UNDEFINED, None) @staticmethod def no_const(): return FlatLatticeValue(FlatLatticeValue.Type.NO_CONST, None) @staticmethod def value(value): return FlatLatticeValue(FlatLatticeValue.Type.VALUE, value) def merge(self, other): assert isinstance(other, FlatLatticeValue) if self.type == other.type and self.type == FlatLatticeValue.Type.VALUE: if self.value._equal_syntax(other.value): return self else: return FlatLatticeValue.no_const() else: if self.type == FlatLatticeValue.Type.UNDEFINED: return other elif other.type == FlatLatticeValue.Type.UNDEFINED: return self elif self.type == FlatLatticeValue.Type.NO_CONST: return self elif other.type == FlatLatticeValue.Type.NO_CONST: return other else: raise NotImplementedError(f"Merge of {other.type} not implemented") def __eq__(self, other): return self.type == other.type and ( self.value._equal_syntax(other.value) if self.type == FlatLatticeValue.Type.VALUE else True ) def __str__(self): if self.type == FlatLatticeValue.Type.VALUE: return f"{self.value}" else: return str(self.type) def __repr__(self): if self.type == FlatLatticeValue.Type.UNDEFINED: return "<undefined>" elif self.type == FlatLatticeValue.Type.VALUE: return f"<value: {self.value}>" elif self.type == FlatLatticeValue.Type.NO_CONST: return "<no_const>" else: raise NotImplementedError(f"__repr__ for type {self.type} not implemented") class CellValues(DataflowValue): """ DataflowValue which generalises FlatLatticeValue so every cell has its own FlatLatticeValue. """ def __init__(self, values=None): self.values = copy(values or {}) @classmethod
def default(cls, cells: List[QiCell], value: FlatLatticeValue):
3
2023-11-10 10:26:10+00:00
16k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependency injection on Redis\n :type redis_dependency: RedisDependency\n :return: The Redis connection instance as a generator\n :rtype: AsyncGenerator[Redis, None]\n \"\"\"\n async with redis_dependency as redis:\n yield redis" }, { "identifier": "get_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_current_user(\n token: Annotated[str, Depends(oauth2_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n access token\n :param token: The Access token from OAuth2PasswordBearer\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n token_service: TokenService = TokenService(redis, auth_settings)\n is_blacklisted: bool = await token_service.is_token_blacklisted(token)\n if is_blacklisted:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token is blacklisted\",\n )\n return await authenticate_user(token, auth_settings, user_service, redis)" }, { "identifier": "get_refresh_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_refresh_current_user(\n refresh_token: Annotated[str, Depends(refresh_token_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n refresh token\n :param refresh_token: The Refresh token from OAuth2PasswordBearer\n :type refresh_token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n return await authenticate_user(\n refresh_token, auth_settings, user_service, redis\n )" }, { "identifier": "get_auth_settings", "path": "app/config/config.py", "snippet": "def get_init_settings() -> InitSettings:\ndef get_settings() -> Settings:\ndef get_sql_settings() -> SQLDatabaseSettings:\ndef get_auth_settings() -> AuthSettings:" }, { "identifier": "AuthSettings", "path": "app/config/db/auth_settings.py", "snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )" }, { "identifier": "InitSettings", "path": "app/config/init_settings.py", "snippet": "class InitSettings(BaseSettings):\n \"\"\"\n Init Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n case_sensitive=True,\n extra=\"allow\",\n )\n\n ITERATIONS: PositiveInt = 100000\n KEY_BYTES_LENGTH: PositiveInt = 32\n SALT_BYTES: PositiveInt = 16\n IV_BYTES: PositiveInt = 12\n PUBLIC_EXPONENT: PositiveInt = 65537\n RSA_KEY_BITS: PositiveInt = 2048\n SALUTE: str = \"Salute!\"\n ROOT_MSG: str = \"Hello, World!\"\n SERVER_NAME: str = \"FastAPI Boilerplate\"\n PROJECT_NAME: str = \"fastapi-boilerplate\"\n VERSION: str = \"1.0\"\n ENCODING: str = \"UTF-8\"\n DEFAULT_REGION: str = \"Guayas\"\n DEFAULT_COUNTRY: str = \"Ecuador\"\n OPENAPI_FILE_PATH: str = \"/openapi.json\"\n DATE_FORMAT: str = \"%Y-%m-%d\"\n DATETIME_FORMAT: str = \"%Y-%m-%d %H:%M:%S\"\n FILE_DATE_FORMAT: str = \"%d-%b-%Y-%H-%M-%S\"\n IMAGES_APP: str = \"images\"\n IMAGES_PATH: str = \"/assets/images\"\n IMAGES_DIRECTORY: str = \"assets/images\"\n EMAIL_TEMPLATES_DIR: str = \"templates\"\n PASSWORD_RECOVERY_SUBJECT: str = \"Password recovery for user\"\n NEW_ACCOUNT_SUBJECT: str = \"New account for user\"\n WELCOME_SUBJECT: str = \"Welcome to \"\n PASSWORD_CHANGED_CONFIRMATION_SUBJECT: str = (\n \"Successfully password \" \"changed for \"\n )\n DELETE_ACCOUNT_SUBJECT: str = \"Account deleted for \"\n LOG_FORMAT: str = (\n \"[%(name)s][%(asctime)s][%(levelname)s][%(module)s]\"\n \"[%(funcName)s][%(lineno)d]: %(message)s\"\n )\n PASSWORD_REGEX: str = (\n \"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?\" \"[#?!@$%^&*-]).{8,14}$\"\n )\n\n SUMMARY: str = \"\"\"This backend project is FastAPI template.\n This project serves as the backend, which aims to provide a robust and\n reliable system to its users.\n This backend application plays a crucial role in providing the\n functionality for user authentication, real-time monitoring,\n data processing, and advanced alerting system. It is designed to ensure\n the scalability and maintainability of the mobile app,\n making it a vital part of the overall solution.\n \"\"\"\n DESCRIPTION: str = f\"\"\"**FastAPI**, **SQLAlchemy** and **Redis** helps you\n do awesome stuff. 🚀\n \\n\\n<img src=\"data:image/png;base64,{img_b64}\"/>\"\"\"\n LICENSE_INFO: dict[str, str] = {\n \"name\": \"MIT\",\n \"identifier\": \"MIT\",\n }\n TAGS_METADATA: list[dict[str, str]] = [\n {\n \"name\": \"user\",\n \"description\": f\"\"\"Operations with users, such as register, get,\n update and delete.\\n\\n<img src=\"data:image/png;base64,\n {users_b64}\" width=\"150\" height=\"100\"/>\"\"\",\n },\n {\n \"name\": \"auth\",\n \"description\": f\"\"\"The authentication logic is here as well as\n password recovery and reset.\n \\n\\n<img src=\"data:image/png;base64,{auth_b64}\" width=\"75\"\n height=\"75\"/>\"\"\",\n },\n ]\n USER_CREATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user create object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone number `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"5939876a4321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n \"region\": \"Andes\",\n \"country\": \"New York\",\n \"postal_code\": \"999999\",\n },\n },\n },\n }\n USER_UPDATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user update object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone numbers `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"59398x54321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n },\n },\n },\n }\n EMAIL_BODY_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** email object that works correctly.\",\n \"value\": \"[email protected]\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": 123,\n },\n }\n TOKEN_PAYLOAD_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** token payload object that works \"\n \"correctly.\",\n \"value\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"token\": \"123\",\n \"password\": \"abc123\",\n },\n },\n }\n AUTHORIZATION_HEADER_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** authorization token object that works \"\n \"correctly.\",\n \"value\": jwt.encode(\n claims=jsonable_encoder(\n {\n \"sub\": f\"username:{str(uuid4())}\",\n \"nationalities\": [\"ECU\"],\n \"email\": \"[email protected]\",\n \"nickname\": \"example\",\n \"preferred_username\": \"example\",\n \"given_name\": \"Some\",\n \"family_name\": \"Example\",\n \"middle_name\": \"One\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"updated_at\": datetime.now(),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n \"exp\": int(time.time()) + 1800,\n \"nbf\": int(time.time()) - 1,\n \"iat\": int(time.time()),\n }\n ),\n key=\"f52e826e62cdd364c86f129cb18db2fe2be93859c5104cac9585f\"\n \"305378dce65\",\n algorithm=\"HS256\",\n ),\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": \"123\",\n },\n }\n LIMIT_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** limit query parameter that works \"\n \"correctly.\",\n \"value\": 1,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert limit `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"5\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }\n SKIP_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** skip query parameter that works \"\n \"correctly.\",\n \"value\": 0,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert skip `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"20\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }" }, { "identifier": "Settings", "path": "app/config/settings.py", "snippet": "class Settings(BaseSettings):\n \"\"\"\n Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n SMTP_PORT: PositiveInt\n SMTP_HOST: str\n SMTP_USER: str\n SMTP_PASSWORD: str\n MAIL_SUBJECT: str\n MAIL_TIMEOUT: float\n EMAILS_FROM_EMAIL: Optional[EmailStr] = None\n EMAILS_FROM_NAME: Optional[str] = None\n SUPERUSER_EMAIL: EmailStr\n SUPERUSER_FIRST_NAME: str\n SUPERUSER_PASSWORD: str\n SUPERUSER_LAST_NAME: str\n SUPERUSER_STREET_ADDRESS: str\n SUPERUSER_LOCALITY: str\n SUPERUSER_POSTAL_CODE: str\n BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []\n\n PUBLIC_KEY_PATH: FilePath\n PRIVATE_KEY_PATH: FilePath\n\n @field_validator(\n \"PUBLIC_KEY_PATH\", \"PRIVATE_KEY_PATH\", mode=\"before\", check_fields=True\n )\n def validate_key_paths(cls, key_path: FilePath) -> FilePath:\n \"\"\"\n Validate the provided key path.\n :param key_path: Provided key path\n :type key_path: FilePath\n :return: The validated key path\n :rtype: FilePath\n \"\"\"\n if not str(key_path).endswith(\".pem\"):\n raise ValueError(f\"{key_path} must have a .pem extension\")\n base_name: str = os.path.basename(key_path)\n if not base_name.endswith(\"key.pem\"):\n raise ValueError(\n f\"{key_path} must have a file name ending with 'key'\"\n )\n return key_path\n\n @field_validator(\"BACKEND_CORS_ORIGINS\", mode=\"before\")\n def assemble_cors_origins(\n cls, v: Union[str, list[str]]\n ) -> Union[list[str], str]:\n \"\"\"\n Assemble a list of allowed CORS origins.\n :param v: Provided CORS origins, either a string or a list of\n strings\n :type v: Union[str, list[str]]\n :return: List of Backend CORS origins to be accepted\n :rtype: Union[list[str], str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n if isinstance(v, list):\n return v\n raise ValueError(v)\n\n CONTACT_NAME: Optional[str] = None\n CONTACT_URL: Optional[AnyHttpUrl] = None\n CONTACT_EMAIL: Optional[EmailStr] = None\n CONTACT: Optional[dict[str, Any]] = None\n\n @field_validator(\"CONTACT\", mode=\"before\")\n def assemble_contact(\n cls, v: Optional[str], info: ValidationInfo\n ) -> dict[str, str]:\n \"\"\"\n Assemble contact information\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The contact attribute\n :rtype: dict[str, str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n contact: dict[str, Any] = {}\n if info.data.get(\"CONTACT_NAME\"):\n contact[\"name\"] = info.data.get(\"CONTACT_NAME\")\n if info.data.get(\"CONTACT_URL\"):\n contact[\"url\"] = info.data.get(\"CONTACT_URL\")\n if info.data.get(\"CONTACT_EMAIL\"):\n contact[\"email\"] = info.data.get(\"CONTACT_EMAIL\")\n return contact" }, { "identifier": "verify_password", "path": "app/core/security/password.py", "snippet": "def verify_password(hashed_password: str, plain_password: str) -> bool:\n \"\"\"\n Verifies if a plain text password matches a hashed password\n :param plain_password: The plain text password to verify\n :type plain_password: str\n :param hashed_password: The hashed password to compare against\n :type hashed_password: str\n :return: True if the passwords match, False otherwise\n :rtype: bool\n \"\"\"\n if not plain_password:\n raise_custom_error(\"Plain password cannot be empty or None\")\n if not hashed_password:\n raise_custom_error(\"Hashed password cannot be empty or None\")\n return crypt_context.verify(plain_password, hashed_password)" }, { "identifier": "NotFoundException", "path": "app/exceptions/exceptions.py", "snippet": "class NotFoundException(Exception):\n \"\"\"\n Not Found Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "ServiceException", "path": "app/exceptions/exceptions.py", "snippet": "class ServiceException(Exception):\n \"\"\"\n Service Layer Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "User", "path": "app/models/sql/user.py", "snippet": "class User(Base): # type: ignore\n \"\"\"\n User model class representing the \"users\" table\n \"\"\"\n\n __tablename__ = \"users\"\n\n id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n index=True,\n nullable=False,\n primary_key=True,\n unique=True,\n server_default=text(\"(gen_random_uuid())\"),\n comment=\"ID of the User\",\n )\n username: Mapped[str] = mapped_column(\n String(15),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Username to identify the user\",\n )\n email: Mapped[EmailStr] = mapped_column(\n String(320),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Preferred e-mail address of the User\",\n )\n first_name: Mapped[str] = mapped_column(\n String(50), nullable=False, comment=\"First name(s) of the User\"\n )\n middle_name: Mapped[str] = mapped_column(\n String(50), nullable=True, comment=\"Middle name(s) of the User\"\n )\n last_name: Mapped[str] = mapped_column(\n String(100), nullable=False, comment=\"Last name(s) of the User\"\n )\n password: Mapped[str] = mapped_column(\n String(60), nullable=False, comment=\"Hashed password of the User\"\n )\n gender: Mapped[Gender] = mapped_column(\n Enum(Gender), nullable=True, comment=\"Gender of the User\"\n )\n birthdate: Mapped[PastDate] = mapped_column(\n Date, nullable=True, comment=\"Birthday of the User\"\n )\n phone_number: Mapped[PhoneNumber] = mapped_column(\n String(20),\n nullable=True,\n comment=\"Preferred telephone number of the User\",\n )\n is_active: Mapped[bool] = mapped_column(\n Boolean(),\n default=True,\n nullable=False,\n server_default=text(\"true\"),\n comment=\"True if the user is active; otherwise false\",\n )\n is_superuser: Mapped[bool] = mapped_column(\n Boolean(),\n default=False,\n nullable=False,\n server_default=text(\"false\"),\n comment=\"True if the user is super user; otherwise false\",\n )\n created_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n default=datetime.now(),\n nullable=False,\n server_default=text(\"now()\"),\n comment=\"Time the User was created\",\n )\n updated_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n nullable=True,\n onupdate=text(\"now()\"),\n comment=\"Time the User was updated\",\n )\n address_id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n ForeignKey(\n \"users_address.id\",\n name=\"users_address_id_fkey\",\n ),\n nullable=False,\n comment=\"ID of the User's address\",\n )\n address: Mapped[\"Address\"] = relationship( # type: ignore\n \"Address\", back_populates=\"users\", lazy=\"joined\"\n )\n\n __table_args__ = (\n CheckConstraint(\n \"char_length(username) >= 4\", name=\"users_username_length\"\n ),\n CheckConstraint(\"char_length(email) >= 3\", name=\"users_email_length\"),\n CheckConstraint(\n sql_database_setting.DB_EMAIL_CONSTRAINT, name=\"users_email_format\"\n ),\n CheckConstraint(\n \"char_length(first_name) >= 1\", name=\"users_first_name_length\"\n ),\n CheckConstraint(\n \"char_length(last_name) >= 1\", name=\"users_last_name_length\"\n ),\n CheckConstraint(\"LENGTH(password) = 60\", name=\"users_password_length\"),\n CheckConstraint(\n sql_database_setting.DB_PHONE_NUMBER_CONSTRAINT,\n name=\"users_phone_number_format\",\n ),\n )" }, { "identifier": "Msg", "path": "app/schemas/external/msg.py", "snippet": "class Msg(BaseModel):\n \"\"\"\n Schema for representing a message.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\"example\": {\"msg\": \"Hello, World!!!\"}}\n )\n\n msg: str = Field(..., title=\"Message\", description=\"Message to display\")" }, { "identifier": "TokenResetPassword", "path": "app/schemas/external/token.py", "snippet": "class TokenResetPassword(BaseModel):\n \"\"\"\n Token Reset Password for Request based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n }\n }\n )\n\n token: str = Field(\n ..., title=\"Token\", description=\"Access token\", min_length=30\n )\n password: str = Field(\n ...,\n title=\"New password\",\n description=\"New password to reset\",\n validate_default=True,\n min_length=8,\n max_length=14,\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password to be validated\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "TokenResponse", "path": "app/schemas/external/token.py", "snippet": "class TokenResponse(Token):\n \"\"\"\n Token for Response based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_response_example,\n )\n\n token_type: str = Field(\n default=\"bearer\", title=\"Token type\", description=\"Type of the token\"\n )" }, { "identifier": "UserResponse", "path": "app/schemas/external/user.py", "snippet": "class UserResponse(UserID, UserBase, UserOptional, UserInDB):\n \"\"\"\n Schema for the response when retrieving a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_response_example,\n )" }, { "identifier": "UserUpdate", "path": "app/schemas/external/user.py", "snippet": "class UserUpdate(BaseModel):\n \"\"\"\n Schema for updating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_example,\n )\n\n username: Optional[str] = Field(\n default=None,\n title=\"Username\",\n description=\"Username to identify the user\",\n min_length=4,\n max_length=15,\n )\n email: Optional[EmailStr] = Field(\n default=None,\n title=\"Email\",\n description=\"Preferred e-mail address of the User\",\n )\n first_name: Optional[str] = Field(\n default=None,\n title=\"First name\",\n description=\"First name(s) of the User\",\n min_length=1,\n max_length=50,\n )\n middle_name: Optional[str] = Field(\n default=None,\n title=\"Middle Name\",\n description=\"Middle name(s) of the User\",\n max_length=50,\n )\n last_name: Optional[str] = Field(\n default=None,\n title=\"Last name\",\n description=\"Last name(s) of the User\",\n min_length=1,\n max_length=100,\n )\n password: Optional[str] = Field(\n default=None,\n title=\"New Password\",\n description=\"New Password of the User\",\n min_length=8,\n max_length=14,\n )\n gender: Optional[Gender] = Field(\n default=None, title=\"Gender\", description=\"Gender of the User\"\n )\n birthdate: Optional[date] = Field(\n default=None, title=\"Birthdate\", description=\"Birthday of the User\"\n )\n phone_number: Optional[PhoneNumber] = Field(\n default=None,\n title=\"Phone number\",\n description=\"Preferred telephone number of the User\",\n )\n address: Optional[Address] = Field(\n default=None, title=\"Address\", description=\"Address of the User\"\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password value to validate\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "UserUpdateResponse", "path": "app/schemas/external/user.py", "snippet": "class UserUpdateResponse(\n UserAuth, UserName, UserPassword, UserOptional, UserInDB\n):\n \"\"\"\n Schema for the response when updating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_response_example,\n )" }, { "identifier": "UserAuth", "path": "app/schemas/infrastructure/user.py", "snippet": "class UserAuth(UserID, UserBaseAuth):\n \"\"\"\n User Auth that inherits from UserID.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_auth_example,\n )" }, { "identifier": "common_auth_procedure", "path": "app/services/infrastructure/auth.py", "snippet": "async def common_auth_procedure(\n user: User,\n client_ip: str,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n) -> TokenResponse:\n \"\"\"\n Common authentication procedure for login and refresh token based on\n token generation\n :param user: The user to authenticate\n :type user: User\n :param client_ip: The IP address of the client\n :type client_ip: str\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The token response object\n :rtype: TokenResponse\n \"\"\"\n auth_token = AuthService.auth_token(user, auth_settings)\n user_info = f\"{str(user.id)}:{client_ip}\"\n token = TokenDB(key=auth_token.refresh_token, user_info=user_info)\n token_service = TokenService(redis, auth_settings)\n token_set = await token_service.create_token(token)\n if not token_set:\n detail = \"Could not insert data in Authentication database\"\n logger.warning(detail)\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=detail\n )\n return TokenResponse(**auth_token.model_dump())" }, { "identifier": "TokenService", "path": "app/services/infrastructure/token.py", "snippet": "class TokenService:\n \"\"\"\n Service class for token operations in the authentication database\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n ):\n self._redis: Redis = redis # type: ignore\n self._refresh_token_expire_minutes: (\n PositiveInt\n ) = auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES\n self._blacklist_expiration_seconds: PositiveInt = (\n PositiveInt(\n PositiveInt(auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES) + 1\n )\n * 60\n ) # converting minutes to seconds\n\n @handle_redis_exceptions\n @benchmark\n async def create_token(self, token: Token) -> bool:\n \"\"\"\n Create a token in authentication database\n :param token: Token object with key and value\n :type token: Token\n :return: True if the token was inserted; otherwise false\n :rtype: bool\n \"\"\"\n try:\n inserted: bool = await self._redis.setex(\n token.key,\n self._refresh_token_expire_minutes,\n token.user_info,\n )\n except RedisError as r_exc:\n logger.error(\"Error at creating token. %s\", r_exc)\n raise r_exc\n return inserted\n\n @handle_redis_exceptions\n @benchmark\n async def get_token(self, key: str) -> Optional[str]:\n \"\"\"\n Read token from the authentication database\n :param key: The key to search for\n :type key: str\n :return: The refresh token\n :rtype: str\n \"\"\"\n try:\n value: str = str(await self._redis.get(key))\n except RedisError as r_exc:\n logger.error(\"Error at getting token. %s\", r_exc)\n raise r_exc\n return value\n\n @handle_redis_exceptions\n @benchmark\n async def blacklist_token(self, token_key: str) -> bool:\n \"\"\"\n Blacklist a given token.\n :param token_key: The token key to blacklist.\n :type token_key: str\n :return: True if the token was successfully blacklisted,\n otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: bool = await self._redis.setex(\n f\"blacklist:{token_key}\",\n self._blacklist_expiration_seconds,\n \"true\",\n )\n except RedisError as r_exc:\n logger.error(\"Error at blacklisting token. %s\", r_exc)\n raise r_exc\n return blacklisted\n\n @handle_redis_exceptions\n @benchmark\n async def is_token_blacklisted(self, token_key: str) -> bool:\n \"\"\"\n Check if a given token is blacklisted.\n :param token_key: The token key to verify.\n :type token_key: str\n :return: True if the token is blacklisted, otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: Optional[str] = await self._redis.get(\n f\"blacklist\" f\":{token_key}\"\n )\n except RedisError as r_exc:\n logger.error(\"Error at checking if token is blacklisted. %s\", r_exc)\n raise r_exc\n return bool(blacklisted)" }, { "identifier": "UserService", "path": "app/services/infrastructure/user.py", "snippet": "class UserService:\n \"\"\"\n Service class for user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n user_repo: UserRepository,\n redis: Redis, # type: ignore\n ):\n self._user_repo: UserRepository = user_repo\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_user_by_id(self, user_id: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique identifier\n :param user_id: The unique identifier of the user\n :type user_id: UUID4\n :return: User information\n :rtype: Optional[UserResponse]\n \"\"\"\n user: Optional[User]\n try:\n user = await self._user_repo.read_by_id(IdSpecification(user_id))\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n detail: str = f\"User with id {user_id} not found in the system.\"\n logger.error(detail)\n raise NotFoundException(detail)\n user_response: Optional[\n UserResponse\n ] = await model_to_response( # type: ignore\n user, UserResponse\n )\n return user_response\n\n async def get_login_user(self, username: str) -> User:\n \"\"\"\n Retrieve user information for login purposes by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: User\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_username(\n UsernameSpecification(username)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with username: {username}\")\n return user\n\n async def get_user_by_username(\n self, username: str\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: UserResponse\n \"\"\"\n try:\n user: User = await self.get_login_user(username)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_by_email(\n self, email: EmailStr\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique email.\n :param email: The email to retrieve User from\n :type email: EmailStr\n :return: User found in database\n :rtype: Optional[UserResponse]\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with email: {email}\")\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_id_by_email(self, email: EmailStr) -> UUID4:\n \"\"\"\n Read the user ID from the database with unique email.\n :param email: Email to retrieve User from\n :type email: EmailStr\n :return: User ID found in database\n :rtype: UUID4\n \"\"\"\n try:\n user_id: Optional[UUID4] = await self._user_repo.read_id_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user_id:\n raise ServiceException(f\"User ID not found with email: {email}\")\n return user_id\n\n async def register_user(\n self, user: Union[UserCreate, UserSuperCreate]\n ) -> Optional[UserCreateResponse]:\n \"\"\"\n Register a new user in the database\n :param user: Request object representing the user\n :type user: Union[UserCreate, UserSuperCreate]\n :return: Response object representing the created user in the\n database\n :rtype: UserCreateResponse\n \"\"\"\n try:\n created_user = await self._user_repo.create_user(user)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(\n created_user, UserCreateResponse # type: ignore\n )\n\n async def get_users(\n self, offset: Optional[NonNegativeInt], limit: Optional[PositiveInt]\n ) -> list[UserResponse]:\n \"\"\"\n Retrieve users' information from the table\n :param offset: Offset from where to start returning users\n :type offset: NonNegativeInt\n :param limit: Limit the number of results from query\n :type limit: PositiveInt\n :return: User information\n :rtype: list[UserResponse]\n \"\"\"\n try:\n users: list[User] = await self._user_repo.read_users(offset, limit)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n found_users: list[UserResponse] = [\n await model_to_response(user, UserResponse) # type: ignore\n for user in users\n ]\n return found_users\n\n async def update_user(\n self, user_id: UUID4, user: UserUpdate\n ) -> Optional[UserUpdateResponse]:\n \"\"\"\n Update user information from table\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :param user: Requested user information to update\n :type user: UserUpdate\n :return: User information\n :rtype: Optional[UserUpdateResponse]\n \"\"\"\n try:\n updated_user: Optional[User] = await self._user_repo.update_user(\n IdSpecification(user_id), user\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not updated_user:\n raise ServiceException(\n f\"User with user_id: {user_id} could not be updated\"\n )\n return await model_to_response(\n updated_user, UserUpdateResponse # type: ignore\n )\n\n async def delete_user(self, user_id: UUID4) -> dict[str, Any]:\n \"\"\"\n Deletes a user by its id\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :return: Data to confirmation info about the delete process\n :rtype: dict[str, Any]\n \"\"\"\n deleted: bool\n deleted_at: Optional[datetime]\n try:\n deleted = await self._user_repo.delete_user(\n IdSpecification(user_id)\n )\n deleted_at = datetime.now()\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n deleted = False\n deleted_at = None\n finally:\n return {\"ok\": deleted, \"deleted_at\": deleted_at}" }, { "identifier": "get_user_service", "path": "app/services/infrastructure/user.py", "snippet": "async def get_user_service(\n user_repo: Annotated[UserRepository, Depends(get_user_repository)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserService:\n \"\"\"\n Get an instance of the user service with the given repository.\n :param user_repo: User repository object for database connection\n :type user_repo: UserRepository\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: UserService instance with repository associated\n :rtype: UserService\n \"\"\"\n return UserService(user_repo, redis)" }, { "identifier": "send_password_changed_confirmation_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_password_changed_confirmation_email(\n email_to: EmailStr,\n username: str,\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n settings: Annotated[Settings, Depends(get_settings)],\n) -> bool:\n \"\"\"\n Send a password changed confirmation email\n :param email_to: The email address of the recipient with password\n changed\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :return: True if the email was sent; otherwise false\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PASSWORD_CHANGED_CONFIRMATION_SUBJECT}\" f\" {username}\"\n )\n template_str: str = await build_email_template(\n \"password_changed_confirmation.html\", init_settings\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"mailto:{settings.CONTACT_EMAIL}?subject=\"\n f\"{init_settings.PROJECT_NAME} password changed\",\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "send_reset_password_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_reset_password_email(\n email_to: EmailStr,\n username: str,\n token: str,\n settings: Annotated[Settings, Depends(get_settings)],\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> bool:\n \"\"\"\n Sends a password reset email to a user with the given email address\n :param email_to: The email address of the user\n :type email_to: EmailStr\n :param username: The username of the user\n :type username: str\n :param token: The reset password token generated for the user\n :type token: str\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: True if the email was sent successfully; False otherwise\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PROJECT_NAME} -\"\n f\" {init_settings.PASSWORD_RECOVERY_SUBJECT} {username}\"\n )\n template_str: str = await build_email_template(\n \"reset_password.html\", init_settings\n )\n link: str = (\n f\"{auth_settings.SERVER_URL}\"\n f\"{auth_settings.AUTH_URL}reset-password?token={token}\"\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"valid_hours\": auth_settings.EMAIL_RESET_TOKEN_EXPIRE_HOURS,\n \"link\": link,\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "generate_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def generate_password_reset_token(\n email: EmailStr,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> str:\n \"\"\"\n Generate a password reset token for the given email address.\n :param email: The email to generate the reset token for\n :type email: EmailStr\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The password reset token\n :rtype: str\n \"\"\"\n payload: dict[str, Any] = generate_password_reset_payload(\n email, auth_settings\n )\n return encode_jwt(payload, auth_settings)" }, { "identifier": "verify_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def verify_password_reset_token(\n token: str,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> Optional[EmailStr]:\n \"\"\"\n Verify a password reset token and return the email address if valid.\n :param token: The JSON Web Token\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The email address\n :rtype: EmailStr\n \"\"\"\n decoded_token: Optional[dict[str, Any]] = decode_jwt(token, auth_settings)\n return decoded_token.get(\"sub\") if decoded_token else None" } ]
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
13,927
- `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "[email protected]"},
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "[email protected]"},
openapi_examples=init_setting.EMAIL_BODY_EXAMPLES,
3
2023-11-17 00:32:32+00:00
16k
vitant-lang/CBAM-ASPP
train.py
[ { "identifier": "DeepLab", "path": "nets/deeplabv3_plus.py", "snippet": "class DeepLab(nn.Module):\n\tdef __init__(self, num_classes, backbone=\"mobilenet\", pretrained=True, downsample_factor=16):\n\t\tsuper(DeepLab, self).__init__()\n\t\tif backbone==\"xception\":\n\t\t\t#----------------------------------#\n\t\t\t# 获得两个特征层\n\t\t\t# 浅层特征 [128,128,256]\n\t\t\t# 主干部分 [30,30,2048]\n\t\t\t#----------------------------------#\n\t\t\tself.backbone = xception(downsample_factor=downsample_factor, pretrained=pretrained)\n\t\t\tin_channels = 2048\n\t\t\tlow_level_channels = 256\n\t\telif backbone==\"mobilenet\":\n\t\t\t#----------------------------------#\n\t\t\t# 获得两个特征层\n\t\t\t# 浅层特征 [128,128,24]\n\t\t\t# 主干部分 [30,30,320]\n\t\t\t#----------------------------------#\n\t\t\tself.backbone = MobileNetV2(downsample_factor=downsample_factor, pretrained=pretrained)\n\t\t\tin_channels = 320\n\t\t\tlow_level_channels = 24\n\t\telse:\n\t\t\traise ValueError('Unsupported backbone - `{}`, Use mobilenet, xception.'.format(backbone))\n\n\t\t#-----------------------------------------#\n\t\t# ASPP特征提取模块\n\t\t# 利用不同膨胀率的膨胀卷积进行特征提取\n\t\t#-----------------------------------------#\n\t\tself.aspp = ASPP(dim_in=in_channels, dim_out=256, rate=16//downsample_factor)\n\n\t\t#----------------------------------#\n\t\t# 浅层特征边\n\t\t#----------------------------------#\n\t\tself.shortcut_conv = nn.Sequential(\n\t\t\tnn.Conv2d(low_level_channels, 48, 1),\n\t\t\tnn.BatchNorm2d(48),\n\t\t\tnn.ReLU(inplace=True)\n\t\t)\n\n\t\tself.cat_conv = nn.Sequential(\n\t\t\tnn.Conv2d(48+256, 256, 3, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(256),\n\t\t\tnn.ReLU(inplace=True),\n\t\t\tnn.Dropout(0.5),\n\n\t\t\tnn.Conv2d(256, 256, 3, stride=1, padding=1),\n\t\t\tnn.BatchNorm2d(256),\n\t\t\tnn.ReLU(inplace=True),\n\n\t\t\tnn.Dropout(0.1),\n\t\t)\n\t\tself.cls_conv = nn.Conv2d(256, num_classes, 1, stride=1)\n\n\tdef forward(self, x):\n\t\tH, W = x.size(2), x.size(3)\n\t\t#-----------------------------------------#\n\t\t# 获得两个特征层\n\t\t# low_level_features: 浅层特征-进行卷积处理\n\t\t# x : 主干部分-利用ASPP结构进行加强特征提取\n\t\t#-----------------------------------------#\n\t\tlow_level_features, x = self.backbone(x)\n\n\n\t\tx = self.aspp(x)\n\t\tlow_level_features = self.shortcut_conv(low_level_features)\n\n\t\t#-----------------------------------------#\n\t\t# 将加强特征边上采样\n\t\t# 与浅层特征堆叠后利用卷积进行特征提取\n\t\t#-----------------------------------------#\n\t\tx = F.interpolate(x, size=(low_level_features.size(2), low_level_features.size(3)), mode='bilinear', align_corners=True)\n\t\tx = self.cat_conv(torch.cat((x, low_level_features), dim=1))\n\t\tx = self.cls_conv(x)\n\t\tx = F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True)\n\t\treturn x" }, { "identifier": "get_lr_scheduler", "path": "nets/deeplabv3_training.py", "snippet": "def get_lr_scheduler(lr_decay_type, lr, min_lr, total_iters, warmup_iters_ratio = 0.1, warmup_lr_ratio = 0.1, no_aug_iter_ratio = 0.3, step_num = 10):\n def yolox_warm_cos_lr(lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter, iters):\n if iters <= warmup_total_iters:\n # lr = (lr - warmup_lr_start) * iters / float(warmup_total_iters) + warmup_lr_start\n lr = (lr - warmup_lr_start) * pow(iters / float(warmup_total_iters), 2) + warmup_lr_start\n elif iters >= total_iters - no_aug_iter:\n lr = min_lr\n else:\n lr = min_lr + 0.5 * (lr - min_lr) * (\n 1.0 + math.cos(math.pi* (iters - warmup_total_iters) / (total_iters - warmup_total_iters - no_aug_iter))\n )\n return lr\n\n def step_lr(lr, decay_rate, step_size, iters):\n if step_size < 1:\n raise ValueError(\"step_size must above 1.\")\n n = iters // step_size\n out_lr = lr * decay_rate ** n\n return out_lr\n\n if lr_decay_type == \"cos\":\n warmup_total_iters = min(max(warmup_iters_ratio * total_iters, 1), 3)\n warmup_lr_start = max(warmup_lr_ratio * lr, 1e-6)\n no_aug_iter = min(max(no_aug_iter_ratio * total_iters, 1), 15)\n func = partial(yolox_warm_cos_lr ,lr, min_lr, total_iters, warmup_total_iters, warmup_lr_start, no_aug_iter)\n else:\n decay_rate = (min_lr / lr) ** (1 / (step_num - 1))\n step_size = total_iters / step_num\n func = partial(step_lr, lr, decay_rate, step_size)\n\n return func" }, { "identifier": "set_optimizer_lr", "path": "nets/deeplabv3_training.py", "snippet": "def set_optimizer_lr(optimizer, lr_scheduler_func, epoch):\n lr = lr_scheduler_func(epoch)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr" }, { "identifier": "weights_init", "path": "nets/deeplabv3_training.py", "snippet": "def weights_init(net, init_type='normal', init_gain=0.02):\n def init_func(m):\n classname = m.__class__.__name__\n if hasattr(m, 'weight') and classname.find('Conv') != -1:\n if init_type == 'normal':\n torch.nn.init.normal_(m.weight.data, 0.0, init_gain)\n elif init_type == 'xavier':\n torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)\n elif init_type == 'kaiming':\n torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')\n elif init_type == 'orthogonal':\n torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)\n else:\n raise NotImplementedError('initialization method [%s] is not implemented' % init_type)\n elif classname.find('BatchNorm2d') != -1:\n torch.nn.init.normal_(m.weight.data, 1.0, 0.02)\n torch.nn.init.constant_(m.bias.data, 0.0)\n print('initialize network with %s type' % init_type)\n net.apply(init_func)" }, { "identifier": "LossHistory", "path": "utils/callbacks.py", "snippet": "class LossHistory():\n def __init__(self, log_dir, model, input_shape):\n self.log_dir = log_dir\n self.losses = []\n self.val_loss = []\n \n os.makedirs(self.log_dir)\n self.writer = SummaryWriter(self.log_dir)\n try:\n dummy_input = torch.randn(2, 3, input_shape[0], input_shape[1])\n self.writer.add_graph(model, dummy_input)\n except:\n pass\n\n def append_loss(self, epoch, loss, val_loss):\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n self.losses.append(loss)\n self.val_loss.append(val_loss)\n\n with open(os.path.join(self.log_dir, \"epoch_loss.txt\"), 'a') as f:\n f.write(str(loss))\n f.write(\"\\n\")\n with open(os.path.join(self.log_dir, \"epoch_val_loss.txt\"), 'a') as f:\n f.write(str(val_loss))\n f.write(\"\\n\")\n\n self.writer.add_scalar('loss', loss, epoch)\n self.writer.add_scalar('val_loss', val_loss, epoch)\n self.loss_plot()\n\n def loss_plot(self):\n iters = range(len(self.losses))\n\n plt.figure()\n plt.plot(iters, self.losses, 'red', linewidth = 2, label='train loss')\n plt.plot(iters, self.val_loss, 'coral', linewidth = 2, label='val loss')\n try:\n if len(self.losses) < 25:\n num = 5\n else:\n num = 15\n \n plt.plot(iters, scipy.signal.savgol_filter(self.losses, num, 3), 'green', linestyle = '--', linewidth = 2, label='smooth train loss')\n plt.plot(iters, scipy.signal.savgol_filter(self.val_loss, num, 3), '#8B4513', linestyle = '--', linewidth = 2, label='smooth val loss')\n except:\n pass\n\n plt.grid(True)\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend(loc=\"upper right\")\n\n plt.savefig(os.path.join(self.log_dir, \"epoch_loss.png\"))\n\n plt.cla()\n plt.close(\"all\")" }, { "identifier": "EvalCallback", "path": "utils/callbacks.py", "snippet": "class EvalCallback():\n def __init__(self, net, input_shape, num_classes, image_ids, dataset_path, log_dir, cuda, \\\n miou_out_path=\".temp_miou_out\", eval_flag=True, period=1):\n super(EvalCallback, self).__init__()\n \n self.net = net\n self.input_shape = input_shape\n self.num_classes = num_classes\n self.image_ids = image_ids\n self.dataset_path = dataset_path\n self.log_dir = log_dir\n self.cuda = cuda\n self.miou_out_path = miou_out_path\n self.eval_flag = eval_flag\n self.period = period\n \n self.image_ids = [image_id.split()[0] for image_id in image_ids]\n self.mious = [0]\n self.epoches = [0]\n if self.eval_flag:\n with open(os.path.join(self.log_dir, \"epoch_miou.txt\"), 'a') as f:\n f.write(str(0))\n f.write(\"\\n\")\n\n def get_miou_png(self, image):\n #---------------------------------------------------------#\n # 在这里将图像转换成RGB图像,防止灰度图在预测时报错。\n # 代码仅仅支持RGB图像的预测,所有其它类型的图像都会转化成RGB\n #---------------------------------------------------------#\n image = cvtColor(image)\n orininal_h = np.array(image).shape[0]\n orininal_w = np.array(image).shape[1]\n #---------------------------------------------------------#\n # 给图像增加灰条,实现不失真的resize\n # 也可以直接resize进行识别\n #---------------------------------------------------------#\n image_data, nw, nh = resize_image(image, (self.input_shape[1],self.input_shape[0]))\n #---------------------------------------------------------#\n # 添加上batch_size维度\n #---------------------------------------------------------#\n image_data = np.expand_dims(np.transpose(preprocess_input(np.array(image_data, np.float32)), (2, 0, 1)), 0)\n\n with torch.no_grad():\n images = torch.from_numpy(image_data)\n if self.cuda:\n images = images.cuda()\n \n #---------------------------------------------------#\n # 图片传入网络进行预测\n #---------------------------------------------------#\n pr = self.net(images)[0]\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = F.softmax(pr.permute(1,2,0),dim = -1).cpu().numpy()\n #--------------------------------------#\n # 将灰条部分截取掉\n #--------------------------------------#\n pr = pr[int((self.input_shape[0] - nh) // 2) : int((self.input_shape[0] - nh) // 2 + nh), \\\n int((self.input_shape[1] - nw) // 2) : int((self.input_shape[1] - nw) // 2 + nw)]\n #---------------------------------------------------#\n # 进行图片的resize\n #---------------------------------------------------#\n pr = cv2.resize(pr, (orininal_w, orininal_h), interpolation = cv2.INTER_LINEAR)\n #---------------------------------------------------#\n # 取出每一个像素点的种类\n #---------------------------------------------------#\n pr = pr.argmax(axis=-1)\n \n image = Image.fromarray(np.uint8(pr))\n return image\n \n def on_epoch_end(self, epoch, model_eval):\n if epoch % self.period == 0 and self.eval_flag:\n self.net = model_eval\n gt_dir = os.path.join(self.dataset_path, \"VOC2007/SegmentationClass/\")\n pred_dir = os.path.join(self.miou_out_path, 'detection-results')\n if not os.path.exists(self.miou_out_path):\n os.makedirs(self.miou_out_path)\n if not os.path.exists(pred_dir):\n os.makedirs(pred_dir)\n print(\"Get miou.\")\n for image_id in tqdm(self.image_ids):\n #-------------------------------#\n # 从文件中读取图像\n #-------------------------------#\n image_path = os.path.join(self.dataset_path, \"VOC2007/JPEGImages/\"+image_id+\".jpg\")\n image = Image.open(image_path)\n #------------------------------#\n # 获得预测txt\n #------------------------------#\n image = self.get_miou_png(image)\n image.save(os.path.join(pred_dir, image_id + \".png\"))\n \n print(\"Calculate miou.\")\n _, IoUs, _, _ = compute_mIoU(gt_dir, pred_dir, self.image_ids, self.num_classes, None) # 执行计算mIoU的函数\n temp_miou = np.nanmean(IoUs) * 100\n\n self.mious.append(temp_miou)\n self.epoches.append(epoch)\n\n with open(os.path.join(self.log_dir, \"epoch_miou.txt\"), 'a') as f:\n f.write(str(temp_miou))\n f.write(\"\\n\")\n \n plt.figure()\n plt.plot(self.epoches, self.mious, 'red', linewidth = 2, label='train miou')\n\n plt.grid(True)\n plt.xlabel('Epoch')\n plt.ylabel('Miou')\n plt.title('A Miou Curve')\n plt.legend(loc=\"upper right\")\n\n plt.savefig(os.path.join(self.log_dir, \"epoch_miou.png\"))\n plt.cla()\n plt.close(\"all\")\n\n print(\"Get miou done.\")\n shutil.rmtree(self.miou_out_path)" }, { "identifier": "DeeplabDataset", "path": "utils/dataloader.py", "snippet": "class DeeplabDataset(Dataset):\n def __init__(self, annotation_lines, input_shape, num_classes, train, dataset_path):\n super(DeeplabDataset, self).__init__()\n self.annotation_lines = annotation_lines\n self.length = len(annotation_lines)\n self.input_shape = input_shape\n self.num_classes = num_classes\n self.train = train\n self.dataset_path = dataset_path\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, index):\n annotation_line = self.annotation_lines[index]\n name = annotation_line.split()[0]\n\n #-------------------------------#\n # 从文件中读取图像\n #-------------------------------#\n jpg = Image.open(os.path.join(os.path.join(self.dataset_path, \"VOC2007/JPEGImages\"), name + \".jpg\"))\n png = Image.open(os.path.join(os.path.join(self.dataset_path, \"VOC2007/SegmentationClass\"), name + \".png\"))\n #-------------------------------#\n # 数据增强\n #-------------------------------#\n jpg, png = self.get_random_data(jpg, png, self.input_shape, random = self.train)\n\n jpg = np.transpose(preprocess_input(np.array(jpg, np.float64)), [2,0,1])\n png = np.array(png)\n png[png >= self.num_classes] = self.num_classes\n #-------------------------------------------------------#\n # 转化成one_hot的形式\n # 在这里需要+1是因为voc数据集有些标签具有白边部分\n # 我们需要将白边部分进行忽略,+1的目的是方便忽略。\n #-------------------------------------------------------#\n seg_labels = np.eye(self.num_classes + 1)[png.reshape([-1])]\n seg_labels = seg_labels.reshape((int(self.input_shape[0]), int(self.input_shape[1]), self.num_classes + 1))\n\n return jpg, png, seg_labels\n\n def rand(self, a=0, b=1):\n return np.random.rand() * (b - a) + a\n\n def get_random_data(self, image, label, input_shape, jitter=.3, hue=.1, sat=0.7, val=0.3, random=True):\n image = cvtColor(image)\n label = Image.fromarray(np.array(label))\n #------------------------------#\n # 获得图像的高宽与目标高宽\n #------------------------------#\n iw, ih = image.size\n h, w = input_shape\n\n if not random:\n iw, ih = image.size\n scale = min(w/iw, h/ih)\n nw = int(iw*scale)\n nh = int(ih*scale)\n\n image = image.resize((nw,nh), Image.BICUBIC)\n new_image = Image.new('RGB', [w, h], (128,128,128))\n new_image.paste(image, ((w-nw)//2, (h-nh)//2))\n\n label = label.resize((nw,nh), Image.NEAREST)\n new_label = Image.new('L', [w, h], (0))\n new_label.paste(label, ((w-nw)//2, (h-nh)//2))\n return new_image, new_label\n\n #------------------------------------------#\n # 对图像进行缩放并且进行长和宽的扭曲\n #------------------------------------------#\n new_ar = iw/ih * self.rand(1-jitter,1+jitter) / self.rand(1-jitter,1+jitter)\n scale = self.rand(0.25, 2)\n if new_ar < 1:\n nh = int(scale*h)\n nw = int(nh*new_ar)\n else:\n nw = int(scale*w)\n nh = int(nw/new_ar)\n image = image.resize((nw,nh), Image.BICUBIC)\n label = label.resize((nw,nh), Image.NEAREST)\n \n #------------------------------------------#\n # 翻转图像\n #------------------------------------------#\n flip = self.rand()<.5\n if flip: \n image = image.transpose(Image.FLIP_LEFT_RIGHT)\n label = label.transpose(Image.FLIP_LEFT_RIGHT)\n \n #------------------------------------------#\n # 将图像多余的部分加上灰条\n #------------------------------------------#\n dx = int(self.rand(0, w-nw))\n dy = int(self.rand(0, h-nh))\n new_image = Image.new('RGB', (w,h), (128,128,128))\n new_label = Image.new('L', (w,h), (0))\n new_image.paste(image, (dx, dy))\n new_label.paste(label, (dx, dy))\n image = new_image\n label = new_label\n\n image_data = np.array(image, np.uint8)\n\n #------------------------------------------#\n # 高斯模糊\n #------------------------------------------#\n blur = self.rand() < 0.25\n if blur: \n image_data = cv2.GaussianBlur(image_data, (5, 5), 0)\n\n #------------------------------------------#\n # 旋转\n #------------------------------------------#\n rotate = self.rand() < 0.25\n if rotate: \n center = (w // 2, h // 2)\n rotation = np.random.randint(-10, 11)\n M = cv2.getRotationMatrix2D(center, -rotation, scale=1)\n image_data = cv2.warpAffine(image_data, M, (w, h), flags=cv2.INTER_CUBIC, borderValue=(128,128,128))\n label = cv2.warpAffine(np.array(label, np.uint8), M, (w, h), flags=cv2.INTER_NEAREST, borderValue=(0))\n\n #---------------------------------#\n # 对图像进行色域变换\n # 计算色域变换的参数\n #---------------------------------#\n r = np.random.uniform(-1, 1, 3) * [hue, sat, val] + 1\n #---------------------------------#\n # 将图像转到HSV上\n #---------------------------------#\n hue, sat, val = cv2.split(cv2.cvtColor(image_data, cv2.COLOR_RGB2HSV))\n dtype = image_data.dtype\n #---------------------------------#\n # 应用变换\n #---------------------------------#\n x = np.arange(0, 256, dtype=r.dtype)\n lut_hue = ((x * r[0]) % 180).astype(dtype)\n lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)\n lut_val = np.clip(x * r[2], 0, 255).astype(dtype)\n\n image_data = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))\n image_data = cv2.cvtColor(image_data, cv2.COLOR_HSV2RGB)\n \n return image_data, label" }, { "identifier": "deeplab_dataset_collate", "path": "utils/dataloader.py", "snippet": "def deeplab_dataset_collate(batch):\n images = []\n pngs = []\n seg_labels = []\n for img, png, labels in batch:\n images.append(img)\n pngs.append(png)\n seg_labels.append(labels)\n images = torch.from_numpy(np.array(images)).type(torch.FloatTensor)\n pngs = torch.from_numpy(np.array(pngs)).long()\n seg_labels = torch.from_numpy(np.array(seg_labels)).type(torch.FloatTensor)\n return images, pngs, seg_labels" }, { "identifier": "download_weights", "path": "utils/utils.py", "snippet": "def download_weights(backbone, model_dir=\"./model_data\"):\n import os\n from torch.hub import load_state_dict_from_url\n \n download_urls = {\n 'mobilenet' : 'https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/mobilenet_v2.pth.tar',\n 'xception' : 'https://github.com/bubbliiiing/deeplabv3-plus-pytorch/releases/download/v1.0/xception_pytorch_imagenet.pth',\n }\n url = download_urls[backbone]\n \n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n load_state_dict_from_url(url, model_dir)" }, { "identifier": "show_config", "path": "utils/utils.py", "snippet": "def show_config(**kwargs):\n print('Configurations:')\n print('-' * 70)\n print('|%25s | %40s|' % ('keys', 'values'))\n print('-' * 70)\n for key, value in kwargs.items():\n print('|%25s | %40s|' % (str(key), str(value)))\n print('-' * 70)" }, { "identifier": "fit_one_epoch", "path": "utils/utils_fit.py", "snippet": "def fit_one_epoch(model_train, model, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch, cuda, dice_loss, focal_loss, cls_weights, num_classes, \\\n fp16, scaler, save_period, save_dir, local_rank=0):\n total_loss = 0\n total_f_score = 0\n\n val_loss = 0\n val_f_score = 0\n\n if local_rank == 0:\n print('Start Train')\n pbar = tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\n model_train.train()\n for iteration, batch in enumerate(gen):\n if iteration >= epoch_step: \n break\n imgs, pngs, labels = batch\n\n with torch.no_grad():\n weights = torch.from_numpy(cls_weights)\n if cuda:\n imgs = imgs.cuda(local_rank)\n pngs = pngs.cuda(local_rank)\n labels = labels.cuda(local_rank)\n weights = weights.cuda(local_rank)\n #----------------------#\n # 清零梯度\n #----------------------#\n optimizer.zero_grad()\n if not fp16:\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n\n with torch.no_grad():\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n\n #----------------------#\n # 反向传播\n #----------------------#\n loss.backward()\n optimizer.step()\n else:\n from torch.cuda.amp import autocast\n with autocast():\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n\n with torch.no_grad():\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n \n #----------------------#\n # 反向传播\n #----------------------#\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n\n total_loss += loss.item()\n total_f_score += _f_score.item()\n \n if local_rank == 0:\n pbar.set_postfix(**{'total_loss': total_loss / (iteration + 1), \n 'f_score' : total_f_score / (iteration + 1),\n 'lr' : get_lr(optimizer)})\n pbar.update(1)\n\n if local_rank == 0:\n pbar.close()\n print('Finish Train')\n print('Start Validation')\n pbar = tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3)\n\n model_train.eval()\n for iteration, batch in enumerate(gen_val):\n if iteration >= epoch_step_val:\n break\n imgs, pngs, labels = batch\n with torch.no_grad():\n weights = torch.from_numpy(cls_weights)\n if cuda:\n imgs = imgs.cuda(local_rank)\n pngs = pngs.cuda(local_rank)\n labels = labels.cuda(local_rank)\n weights = weights.cuda(local_rank)\n\n #----------------------#\n # 前向传播\n #----------------------#\n outputs = model_train(imgs)\n #----------------------#\n # 计算损失\n #----------------------#\n if focal_loss:\n loss = Focal_Loss(outputs, pngs, weights, num_classes = num_classes)\n else:\n loss = CE_Loss(outputs, pngs, weights, num_classes = num_classes)\n\n if dice_loss:\n main_dice = Dice_loss(outputs, labels)\n loss = loss + main_dice\n #-------------------------------#\n # 计算f_score\n #-------------------------------#\n _f_score = f_score(outputs, labels)\n\n val_loss += loss.item()\n val_f_score += _f_score.item()\n \n if local_rank == 0:\n pbar.set_postfix(**{'val_loss' : val_loss / (iteration + 1),\n 'f_score' : val_f_score / (iteration + 1),\n 'lr' : get_lr(optimizer)})\n pbar.update(1)\n \n if local_rank == 0:\n pbar.close()\n print('Finish Validation')\n loss_history.append_loss(epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)\n eval_callback.on_epoch_end(epoch + 1, model_train)\n print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))\n print('Total Loss: %.3f || Val Loss: %.3f ' % (total_loss / epoch_step, val_loss / epoch_step_val))\n \n #-----------------------------------------------#\n # 保存权值\n #-----------------------------------------------#\n if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:\n torch.save(model.state_dict(), os.path.join(save_dir, 'ep%03d-loss%.3f-val_loss%.3f.pth' % (epoch + 1, total_loss / epoch_step, val_loss / epoch_step_val)))\n\n if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):\n print('Save best model to best_epoch_weights.pth')\n torch.save(model.state_dict(), os.path.join(save_dir, \"best_epoch_weights.pth\"))\n \n torch.save(model.state_dict(), os.path.join(save_dir, \"last_epoch_weights.pth\"))" } ]
import os import datetime import numpy as np import torch import torch.backends.cudnn as cudnn import torch.distributed as dist import torch.optim as optim from torch.utils.data import DataLoader from nets.deeplabv3_plus import DeepLab from nets.deeplabv3_training import (get_lr_scheduler, set_optimizer_lr, weights_init) from utils.callbacks import LossHistory, EvalCallback from utils.dataloader import DeeplabDataset, deeplab_dataset_collate from utils.utils import download_weights, show_config from utils.utils_fit import fit_one_epoch from torch.cuda.amp import GradScaler as GradScaler
12,537
Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone)
''' 训练自己的语义分割模型一定需要注意以下几点: 1、训练前仔细检查自己的格式是否满足要求,该库要求数据集格式为VOC格式,需要准备好的内容有输入图片和标签 输入图片为.jpg图片,无需固定大小,传入训练前会自动进行resize。 灰度图会自动转成RGB图片进行训练,无需自己修改。 输入图片如果后缀非jpg,需要自己批量转成jpg后再开始训练。 标签为png图片,无需固定大小,传入训练前会自动进行resize。 由于许多同学的数据集是网络上下载的,标签格式并不符合,需要再度处理。一定要注意!标签的每个像素点的值就是这个像素点所属的种类。 网上常见的数据集总共对输入图片分两类,背景的像素点值为0,目标的像素点值为255。这样的数据集可以正常运行但是预测是没有效果的! 需要改成,背景的像素点值为0,目标的像素点值为1。 如果格式有误,参考:https://github.com/bubbliiiing/segmentation-format-fix 2、损失值的大小用于判断是否收敛,比较重要的是有收敛的趋势,即验证集损失不断下降,如果验证集损失基本上不改变的话,模型基本上就收敛了。 损失值的具体大小并没有什么意义,大和小只在于损失的计算方式,并不是接近于0才好。如果想要让损失好看点,可以直接到对应的损失函数里面除上10000。 训练过程中的损失值会保存在logs文件夹下的loss_%Y_%m_%d_%H_%M_%S文件夹中 3、训练好的权值文件保存在logs文件夹中,每个训练世代(Epoch)包含若干训练步长(Step),每个训练步长(Step)进行一次梯度下降。 如果只是训练了几个Step是不会保存的,Epoch和Step的概念要捋清楚一下。 ''' if __name__ == "__main__": #---------------------------------# # Cuda 是否使用Cuda # 没有GPU可以设置成False #---------------------------------# Cuda = True #---------------------------------------------------------------------# # distributed 用于指定是否使用单机多卡分布式运行 # 终端指令仅支持Ubuntu。CUDA_VISIBLE_DEVICES用于在Ubuntu下指定显卡。 # Windows系统下默认使用DP模式调用所有显卡,不支持DDP。 # DP模式: # 设置 distributed = False # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python train.py # DDP模式: # 设置 distributed = True # 在终端中输入 CUDA_VISIBLE_DEVICES=0,1 python -m torch.distributed.launch --nproc_per_node=2 train.py #---------------------------------------------------------------------# distributed = False #---------------------------------------------------------------------# # sync_bn 是否使用sync_bn,DDP模式多卡可用 #---------------------------------------------------------------------# sync_bn = False #---------------------------------------------------------------------# # fp16 是否使用混合精度训练 # 可减少约一半的显存、需要pytorch1.7.1以上 #---------------------------------------------------------------------# fp16 = False #-----------------------------------------------------# # num_classes 训练自己的数据集必须要修改的 # 自己需要的分类个数+1,如2+1 #-----------------------------------------------------# num_classes = 3 #---------------------------------# # 所使用的的主干网络: # mobilenet # xception #---------------------------------# backbone = "mobilenet" #----------------------------------------------------------------------------------------------------------------------------# # pretrained 是否使用主干网络的预训练权重,此处使用的是主干的权重,因此是在模型构建的时候进行加载的。 # 如果设置了model_path,则主干的权值无需加载,pretrained的值无意义。 # 如果不设置model_path,pretrained = True,此时仅加载主干开始训练。 # 如果不设置model_path,pretrained = False,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 #----------------------------------------------------------------------------------------------------------------------------# pretrained = False #----------------------------------------------------------------------------------------------------------------------------# # 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。 # 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。 # 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好 # 训练自己的数据集时提示维度不匹配正常,预测的东西都不一样了自然维度不匹配 # # 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。 # 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。 # # 当model_path = ''的时候不加载整个模型的权值。 # # 此处使用的是整个模型的权重,因此是在train.py进行加载的,pretrain不影响此处的权值加载。 # 如果想要让模型从主干的预训练权值开始训练,则设置model_path = '',pretrain = True,此时仅加载主干。 # 如果想要让模型从0开始训练,则设置model_path = '',pretrain = Fasle,Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。 # # 一般来讲,网络从0开始的训练效果会很差,因为权值太过随机,特征提取效果不明显,因此非常、非常、非常不建议大家从0开始训练! # 如果一定要从0开始,可以了解imagenet数据集,首先训练分类模型,获得网络的主干部分权值,分类模型的 主干部分 和该模型通用,基于此进行训练。 #----------------------------------------------------------------------------------------------------------------------------# model_path = "model_data/deeplab_mobilenetv2.pth" #---------------------------------------------------------# # downsample_factor 下采样的倍数8、16 # 8下采样的倍数较小、理论上效果更好。 # 但也要求更大的显存 #---------------------------------------------------------# downsample_factor = 8 #------------------------------# # 输入图片的大小 #------------------------------# input_shape = [512, 512] #----------------------------------------------------------------------------------------------------------------------------# # 训练分为两个阶段,分别是冻结阶段和解冻阶段。设置冻结阶段是为了满足机器性能不足的同学的训练需求。 # 冻结训练需要的显存较小,显卡非常差的情况下,可设置Freeze_Epoch等于UnFreeze_Epoch,此时仅仅进行冻结训练。 # # 在此提供若干参数设置建议,各位训练者根据自己的需求进行灵活调整: # (一)从整个模型的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:UnFreeze_Epoch可以在100-300之间调整。 # (二)从主干网络的预训练权重开始训练: # Adam: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 100,Freeze_Train = True,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 100,Freeze_Train = False,optimizer_type = 'adam',Init_lr = 5e-4,weight_decay = 0。(不冻结) # SGD: # Init_Epoch = 0,Freeze_Epoch = 50,UnFreeze_Epoch = 120,Freeze_Train = True,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(冻结) # Init_Epoch = 0,UnFreeze_Epoch = 120,Freeze_Train = False,optimizer_type = 'sgd',Init_lr = 7e-3,weight_decay = 1e-4。(不冻结) # 其中:由于从主干网络的预训练权重开始训练,主干的权值不一定适合语义分割,需要更多的训练跳出局部最优解。 # UnFreeze_Epoch可以在120-300之间调整。 # Adam相较于SGD收敛的快一些。因此UnFreeze_Epoch理论上可以小一点,但依然推荐更多的Epoch。 # (三)batch_size的设置: # 在显卡能够接受的范围内,以大为好。显存不足与数据集大小无关,提示显存不足(OOM或者CUDA out of memory)请调小batch_size。 # 受到BatchNorm层影响,batch_size最小为2,不能为1。 # 正常情况下Freeze_batch_size建议为Unfreeze_batch_size的1-2倍。不建议设置的差距过大,因为关系到学习率的自动调整。 #----------------------------------------------------------------------------------------------------------------------------# #------------------------------------------------------------------# # 冻结阶段训练参数 # 此时模型的主干被冻结了,特征提取网络不发生改变 # 占用的显存较小,仅对网络进行微调 # Init_Epoch 模型当前开始的训练世代,其值可以大于Freeze_Epoch,如设置: # Init_Epoch = 60、Freeze_Epoch = 50、UnFreeze_Epoch = 100 # 会跳过冻结阶段,直接从60代开始,并调整对应的学习率。 # (断点续练时使用) # Freeze_Epoch 模型冻结训练的Freeze_Epoch # (当Freeze_Train=False时失效) # Freeze_batch_size 模型冻结训练的batch_size # (当Freeze_Train=False时失效) #------------------------------------------------------------------# Init_Epoch = 0 Freeze_Epoch = 10 Freeze_batch_size = 8 #------------------------------------------------------------------# # 解冻阶段训练参数 # 此时模型的主干不被冻结了,特征提取网络会发生改变 # 占用的显存较大,网络所有的参数都会发生改变 # UnFreeze_Epoch 模型总共训练的epoch # Unfreeze_batch_size 模型在解冻后的batch_size #------------------------------------------------------------------# UnFreeze_Epoch = 20 Unfreeze_batch_size = 4 #------------------------------------------------------------------# # Freeze_Train 是否进行冻结训练 # 默认先冻结主干训练后解冻训练。 #------------------------------------------------------------------# Freeze_Train = True #------------------------------------------------------------------# # 其它训练参数:学习率、优化器、学习率下降有关 #------------------------------------------------------------------# #------------------------------------------------------------------# # Init_lr 模型的最大学习率 # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # Min_lr 模型的最小学习率,默认为最大学习率的0.01 #------------------------------------------------------------------# Init_lr = 7e-4 Min_lr = Init_lr * 0.01 #------------------------------------------------------------------# # optimizer_type 使用到的优化器种类,可选的有adam、sgd # 当使用Adam优化器时建议设置 Init_lr=5e-4 # 当使用SGD优化器时建议设置 Init_lr=7e-3 # momentum 优化器内部使用到的momentum参数 # weight_decay 权值衰减,可防止过拟合 # adam会导致weight_decay错误,使用adam时建议设置为0。 #------------------------------------------------------------------# optimizer_type = "sgd" momentum = 0.9 weight_decay = 1e-4 #1e-4 sgd是 #------------------------------------------------------------------# # lr_decay_type 使用到的学习率下降方式,可选的有'step'、'cos' #------------------------------------------------------------------# lr_decay_type = 'cos' #------------------------------------------------------------------# # save_period 多少个epoch保存一次权值 #------------------------------------------------------------------# save_period = 800 #------------------------------------------------------------------# # save_dir 权值与日志文件保存的文件夹 #------------------------------------------------------------------# save_dir = 'logs' #------------------------------------------------------------------# # eval_flag 是否在训练时进行评估,评估对象为验证集 # eval_period 代表多少个epoch评估一次,不建议频繁的评估 # 评估需要消耗较多的时间,频繁评估会导致训练非常慢 # 此处获得的mAP会与get_map.py获得的会有所不同,原因有二: # (一)此处获得的mAP为验证集的mAP。 # (二)此处设置评估参数较为保守,目的是加快评估速度。 #------------------------------------------------------------------# eval_flag = True eval_period = 400 #7.13开始跑 #10点40 #------------------------------------------------------------------# # VOCdevkit_path 数据集路径 #------------------------------------------------------------------# VOCdevkit_path = 'VOCdevkit' #------------------------------------------------------------------# # 建议选项: # 种类少(几类)时,设置为True # 种类多(十几类)时,如果batch_size比较大(10以上),那么设置为True # 种类多(十几类)时,如果batch_size比较小(10以下),那么设置为False #------------------------------------------------------------------# dice_loss = False #------------------------------------------------------------------# # 是否使用focal loss来防止正负样本不平衡 #------------------------------------------------------------------# focal_loss = False #------------------------------------------------------------------# # 是否给不同种类赋予不同的损失权值,默认是平衡的。 # 设置的话,注意设置成numpy形式的,长度和num_classes一样。 # 如: # num_classes = 3 # cls_weights = np.array([1, 2, 3], np.float32) #------------------------------------------------------------------# cls_weights = np.ones([num_classes], np.float32) #------------------------------------------------------------------# # num_workers 用于设置是否使用多线程读取数据,1代表关闭多线程 # 开启后会加快数据读取速度,但是会占用更多内存 # keras里开启多线程有些时候速度反而慢了许多 # 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。 #------------------------------------------------------------------# num_workers = 4 #------------------------------------------------------# # 设置用到的显卡 #------------------------------------------------------# ngpus_per_node = torch.cuda.device_count() if distributed: dist.init_process_group(backend="nccl") local_rank = int(os.environ["LOCAL_RANK"]) rank = int(os.environ["RANK"]) device = torch.device("cuda", local_rank) if local_rank == 0: print(f"[{os.getpid()}] (rank = {rank}, local_rank = {local_rank}) training...") print("Gpu Device Count : ", ngpus_per_node) else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') local_rank = 0 #----------------------------------------------------# # 下载预训练权重 #----------------------------------------------------# if pretrained: if distributed: if local_rank == 0: download_weights(backbone) dist.barrier() else: download_weights(backbone)
model = DeepLab(num_classes=num_classes, backbone=backbone, downsample_factor=downsample_factor, pretrained=pretrained)
0
2023-11-17 13:25:28+00:00
16k
fg320/DEASC
examples/12D_5x5_farm_dyn_tuning_wso_grouping_CI.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info" }, { "identifier": "WSOpt", "path": "deasc/wake_steering.py", "snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)" }, { "identifier": "GPWrap", "path": "deasc/gp.py", "snippet": "class GPWrap:\n \"\"\"\n Wrapper class to create, modify and visualise Gaussian Processes for dynamic parameter\n tuning. Currently limited to a single output GP.\n \"\"\"\n\n def __init__(self, parameter_class, parameter_name, dimensions):\n self.parameter_class = parameter_class\n self.parameter_name = parameter_name\n self.dimensions = dimensions\n \"\"\"\n Args\n ----\n parameter_class: string\n Parameter class of the optimal parameter to fit.\n parameter_name: string\n Name of the optimal parameter to fit.\n dimensions: integer\n Dimensions/inputs/variables of the GP.\n \"\"\"\n\n def GP_so(self, yaw_data, param_data, num_restarts=50, noise=0.05):\n \"\"\"\n Construct and returns a single-output (SO) GP for the given input dataset\n (optimal parameter for a given yaw configuration).\n\n Args\n ----\n yaw_data: list of lists\n list of input yaw configurations for which parameter has been tuned\n param_data: list of lists\n for each yaw configuration in yaw_data, list containing the optimal parameter\n num_restarts: int\n number of random starts of the GP hyperparameter tuning optimization\n noise: float\n noise in output prediction. Default is 0.05\n\n Returns\n -------\n m: GPy single-output Gaussian Process model\n \"\"\"\n # Sample check on argument dimension\n if len(yaw_data[0]) != self.dimensions:\n err_msg = (\"Yaw input and GP dimensions do not match\")\n raise Exception(err_msg)\n if len(param_data[0]) != 1:\n err_msg = (\"Single-output GPs only\")\n raise Exception(err_msg)\n\n # Data structure arguments\n yaw_data_GP = np.array(yaw_data)\n param_data_GP = np.array(param_data)\n\n # GP model\n kernel = GPy.kern.RBF(input_dim=self.dimensions, variance=1., lengthscale=1.)\n self.m = GPy.models.GPRegression(yaw_data_GP,\n param_data_GP,\n kernel,\n noise_var=noise)\n\n # Hyperparameter tuning\n self.m.optimize(optimizer=None, # Default lbfgsb\n start=None,\n messages=False,\n max_iters=1000)\n self.m.optimize_restarts(num_restarts=num_restarts)\n return self.m\n\n def GP_so_plot(self, parameter_range_plot, yaw_range_plot):\n \"\"\"\n Plot a single-output (SO) GP model. 1D and 2D plots are generated for each\n variable combination.\n\n Args\n ----\n parameter_range: tuple\n range of the optimal parameter to plot\n parameter_range: tuple\n range of the yaw variables to plot\n \"\"\"\n # Plotting library choice and defaults values\n GPy.plotting.change_plotting_library('matplotlib')\n GPy.plotting.matplot_dep.defaults.data_2d = {'s': 0,\n 'edgecolors': 'none',\n 'linewidth': 0.0,\n 'cmap': cm.get_cmap('hot'),\n 'alpha': 0.5}\n\n # 1D Plots\n if self.dimensions == 1:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(5, 2.5))\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n fig = self.m.plot(figure=figure,\n col=1,\n row=1,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=True)\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n figsize = (5*n_cuts, 2.5*self.dimensions)\n figure = GPy.plotting.plotting_library().figure(self.dimensions,\n n_cuts,\n figsize=figsize)\n\n for dim_idx in range(self.dimensions):\n for i, slice_single in zip(range(n_cuts), slices):\n title = \"GP %s - $\\gamma_{others}$\" \\\n \"%.1f $^{\\circ}$\" % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (dim_idx+1)\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n inputs = []\n for j in range(self.dimensions):\n if j == dim_idx:\n pass\n else:\n inputs.append((j, slice_single))\n fig = self.m.plot(figure=figure,\n col=(i+1),\n row=(dim_idx+1),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=False)\n\n # 2D Plots\n # Countours are fine ##\n # Data points (training) plotted are off ##\n # double checked with GP and training database ##\n if self.dimensions == 1:\n pass\n elif self.dimensions == 2:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(3, 2.5))\n\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$\\gamma_{2}$ [deg]'\n\n fig = self.m.plot(figure=figure,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n plot_rows = self.dimensions-1\n plot_cols = self.dimensions-1\n combinations = list(itertools.combinations(\n list(range(0, self.dimensions)), 2))\n\n figsize = (3*plot_cols*len(slices), 2.5*plot_rows)\n figure = GPy.plotting.plotting_library().figure(plot_rows,\n plot_cols*len(slices),\n figsize=figsize)\n for i, slice_single in zip(range(n_cuts), slices):\n for comb_idx, comb in enumerate(combinations):\n title = 'GP %s - $\\gamma_{others}$' \\\n '%.1f $^{\\circ}$' % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (comb[0]+1)\n ylabel = '$\\gamma_{%i}$ [deg]' % (comb[1]+1)\n inputs = []\n for j in range(self.dimensions):\n if j in comb:\n pass\n else:\n inputs.append((j, slice_single))\n\n fig = self.m.plot(figure=figure,\n col=(comb[0]+1+plot_cols*i),\n row=(comb[1]),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))" }, { "identifier": "TuningDyn_Grouping_CI", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Grouping_CI(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"\n Class for dynamic parameter tuning with grouping using column-independence (CI)\n of turbines within a wind farm.\n \"\"\"\n\n def __init__(self,\n param_class,\n param_name,\n turbines_cols,\n tuning_groups_cols_dict,\n GP_model_cols_dict):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n turbine_cols: (list of lists) list of lists, each containing the turbines for\n each column in the effective wind farm layout. List of list even for a single\n turbine column.\n tuning_groups_cols_dict: (dict) dictionary with string keys corresponding to\n column lenghts (e.g., \"5x1\") and corresponding list of lists values with the\n groups of turbines to tune. For each group list, include sublists of turbines\n in the group (turbine naming convention relative to the single column,\n e.g. [[1,2],[3,4]]). All column lenghts to be included, even if tuning\n group is an empty [] list.\n GP_model_cols_dict: (dict) dictionary with string keys corresponding to\n column lenghts (e.g., \"2x1\") and corresponding values are the corresponding\n GPy models for the column length. All column lenghts to be included. For the\n \"1x1\" key, None is acceptable as no tuning is performed.\n \"\"\"\n super().__init__(param_class, param_name)\n # Farm columns info\n self.turbines_cols = turbines_cols\n self.turbines_cols_len = [len(col) for col in self.turbines_cols]\n # Tuning info\n self.tuning_variables_cols_dict = tuning_groups_cols_dict\n self._tuning_groups_cols_dict_check()\n self.tuning_variables_cols = self._get_tuning_groups_cols()\n self.tuning_dimensions_cols = [len(item) for item in self.tuning_variables_cols]\n self.GP_model_cols_dict = GP_model_cols_dict\n self.GP_model_cols = self._get_GP_model_cols()\n # GP dimension check\n for i in range(len(self.turbines_cols)):\n self._GP_dimension_check(self.tuning_dimensions_cols[i],\n self.GP_model_cols[i])\n # Grouping info\n self.tuning_groups_cols = self.tuning_variables_cols\n self.tuning_groups = [x for y in self.tuning_groups_cols for x in y]\n self.grouping_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return [x for sublist in self.tuning_groups for x in sublist]\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._tuning_groups_check(wso_obj)\n self._turbines_cols_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary and default parameter\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n default_parameter = floris_extract_parameter(wso_obj.wf_model_dict_original,\n self.param_class,\n self.param_name)\n\n # Create tuned parameter list\n opt_param_list = [0]*wso_obj.wf_model.n_turbs\n for i, tuning_variables in enumerate(self.tuning_variables_cols):\n # If no group to tune in the column, assign default non-tuned value\n if len(tuning_variables[0]) == 0:\n opt_param_list = self._fill_opt_param_list(opt_param_list,\n self.turbines_cols[i],\n default_parameter)\n # Tune parameter for the each column\n else:\n GP_input = self._get_GP_input_groups(tuning_variables, yaw_angles)\n GP_model = self.GP_model_cols[i]\n mu, var, = GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n opt_param_list = self._fill_opt_param_list(opt_param_list,\n self.turbines_cols[i],\n optimal_parameter)\n # Apply tuned parameter list\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n opt_param_list)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def set_yaw_groups(self, yaw_angles):\n \"\"\"\n Force yaw angles of turbines in tuning groups to be equal in the wake\n steering optimisation.\n\n Args\n ----\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n yaw_angles_grouped: (np.ndarray) yaw angles of all turbines in the wind farm with\n equal yaw angles in each turbine group.\n \"\"\"\n return self._set_yaw_groups(yaw_angles)\n\n # Private methods\n def _tuning_groups_cols_dict_check(self):\n for key in self.tuning_variables_cols_dict.keys():\n col_len = int(key[0])\n tuning_groups_loc = self.tuning_variables_cols_dict[key]\n if len(tuning_groups_loc) > col_len:\n err_msg = \"Too many groups specified in tuning turbines dictionary.\"\n raise Exception(err_msg)\n for group_loc in tuning_groups_loc:\n if len(group_loc) > col_len:\n err_msg = \"Too many turbines specified in tuning groups dictionary.\"\n raise Exception(err_msg)\n for turbine_loc in group_loc:\n if turbine_loc > col_len:\n err_msg = \"Turbine specified outside of column.\"\n raise Exception(err_msg)\n if turbine_loc < STARTING_INDEX:\n err_msg = \"Turbine/row counting convention starts from 1.\"\n raise Exception(err_msg)\n\n def _get_tuning_groups_cols(self):\n tuning_groups_cols = []\n for turbines in self.turbines_cols:\n tuning_groups = []\n key = \"%ix1\" % (len(turbines))\n tuning_groups_loc = self.tuning_variables_cols_dict[key]\n for group_loc in tuning_groups_loc:\n tuning_turbines = []\n for turbine_loc in group_loc:\n turbine_idx = turbine_loc-STARTING_INDEX\n tuning_turbines.append(turbines[turbine_idx])\n tuning_groups.append(tuning_turbines)\n tuning_groups_cols.append(tuning_groups)\n return tuning_groups_cols" }, { "identifier": "floris_extract_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()" }, { "identifier": "floris_param_change_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new" }, { "identifier": "floris_param_change_object", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model" } ]
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import GPWrap from deasc import TuningDyn_Grouping_CI from deasc.utils_floris import ( floris_extract_object_dict, floris_param_change_object_dict, floris_param_change_object )
12,695
""" This example shows wake steering optimisation on a 5x5 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping and column-independence is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables for each column are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 5, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(25), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [int(x) for x in np.linspace(1, 20, 20)] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # %% Dynamic tuning object based on a single farm column # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation tuning_turbines_cols_dict = {}
""" This example shows wake steering optimisation on a 5x5 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with grouping and column-independence is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables for each column are the yaw angles two most upstream groups, each of two turbines. """ # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 5, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(25), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [int(x) for x in np.linspace(1, 20, 20)] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # %% Dynamic tuning object based on a single farm column # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation tuning_turbines_cols_dict = {}
tuning_dyn_obj = TuningDyn_Grouping_CI(param_class=parameter_class,
3
2023-11-10 18:13:27+00:00
16k
OpenBMB/XAgent
command.py
[ { "identifier": "XAgentServerEnv", "path": "XAgentServer/application/core/envs.py", "snippet": "class XAgentServerEnv:\n \"\"\"\n XAgentServer environment variables\n if you change value of the environment variable, you need to restart \n the XAgentServer by running the following command:\n `python start_server.py`\n or start a unicorn server by yourself\n \"\"\"\n app = \"app:app\"\n prod: bool = config.get(\"PROD\", \"False\").lower() == \"true\"\n base_dir = \"XAgentServer\"\n use_redis: bool = False\n recorder_root_dir = \"running_records\"\n # you can set default_login with True,\n # use the default user \"admin\" with token \"xagent-admin\" to login,\n default_login: bool = True\n # only one XAgentServer can be set to check whether the interaction is running.\n check_running: bool = False\n host = \"0.0.0.0\"\n port = 8090\n debug = True\n reload = True\n workers = 1\n share_url = \"https://x-agent.net/api/conv/community\"\n\n class DB:\n \"\"\"\n database config\n \"\"\"\n use_db = True\n db_url = \"mysql+pymysql://root:xagent@localhost:3306/xagent\"\n\n class Redis:\n \"\"\"\n redis config\n \"\"\"\n use_redis = False\n redis_url = \"redis://localhost\"\n redis_host = \"localhost\"\n redis_port = 6379\n redis_db = 0\n redis_password = \"xagent\"\n\n # if you want to use email to send message,\n # you can set send_email to True and set\n # email_host,\n # email_port,\n # email_user,\n # email_password,\n # auth_server\n class Email:\n \"\"\"\n email config\n \"\"\"\n send_email = False\n email_host = \"\"\n email_port = 465\n email_user = \"\"\n email_password = \"\"\n auth_server = \"\"\n\n # if you want to use upload function,\n # you can set upload_dir to the path of the upload directory\n # and set upload_allowed_types of the allowed types\n class Upload:\n \"\"\"\n upload config\n \"\"\"\n upload_dir = \"XAgentServer/localstorage/upload\"\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n upload_allowed_types = [\"image/png\", \"image/jpeg\",\n \"image/gif\", \"text/plain\",\n \"application/msword\", \"pdf\",\n \"txt\", \"pptx\", \"xlsx\",\n \"doc\", \"ppt\", \"xls\",\n \"zip\", \"rar\", \"tar\",\n \"gz\", \"7z\", \"bz2\",\n \"tgz\", \"tbz2\", \"tar.gz\",\n \"tar.bz2\"]" }, { "identifier": "SessionLocal", "path": "XAgentServer/database/connect.py", "snippet": "SQLALCHEMY_DATABASE_URL = os.getenv('MYSQL_DB_URL', XAgentServerEnv.DB.db_url)" }, { "identifier": "StatusEnum", "path": "XAgentServer/enums/status.py", "snippet": "class StatusEnum:\n \"\"\"XAgent Status Enum\n \"\"\"\n START = \"start\"\n SUBTASK = \"subtask\"\n REFINEMENT = \"refinement\"\n INNER = \"inner\"\n FINISHED = \"finished\"\n FAILED = \"failed\"\n SUBMIT = \"subtask_submit\"\n RUNNING = \"running\"\n ASK_FOR_HUMAN_HELP = \"ask_for_human_help\"\n CLOSED = \"closed\"" }, { "identifier": "XAgentError", "path": "XAgentServer/exts/exception_ext.py", "snippet": "class XAgentError(Exception):\n \"\"\"Base class for exceptions in this module.\"\"\"\n def __init__(self, message=\"XAgent Error!\"):\n self.message = message\n super().__init__(self.message)" }, { "identifier": "XAgentInteraction", "path": "XAgentServer/interaction.py", "snippet": "class XAgentInteraction(metaclass=abc.ABCMeta):\n \"\"\"\n XAgent 核心交互组件集, 引用: XAgentCE\n Attributes:\n base: 交互基本信息\n parameter: 交互参数\n interrupt: 是否包含中断\n toolserver: 工具服务\n call_method: 调用方式\n wait_seconds: 等待时间\n \n Components:\n logger: 日志\n db: 数据库\n recorder: 运行记录\n toolserver_interface: 工具服务接口\n \n 组件集中的所有组件全局唯一\n\n \"\"\"\n\n def __init__(\n self,\n base: InteractionBase,\n parameter: InteractionParameter,\n interrupt: bool = False,\n call_method: str = \"web\",\n wait_seconds: int = 600,\n ) -> None:\n self.base = base\n self.parameter = parameter\n # 唯一标识当前的执行步骤\n self.current_step = uuid.uuid4().hex\n self.logger = None\n self.interrupt = interrupt\n self.call_method = call_method\n self.wait_seconds = wait_seconds\n self.log_dir = os.path.join(\n os.path.join(XAgentServerEnv.base_dir,\n \"localstorage\",\n \"interact_records\"),\n datetime.now().strftime(\"%Y-%m-%d\"),\n self.base.interaction_id)\n self.human_data = None\n if not os.path.exists(self.log_dir):\n os.makedirs(self.log_dir)\n\n self.extract_dir = os.path.join(self.log_dir, \"workspace\")\n if not os.path.exists(self.extract_dir):\n os.makedirs(self.extract_dir)\n\n self.db: Session = None\n self.toolserver_interface = None\n\n def register_toolserver_interface(self, toolserver_interface: ToolServerInterface):\n \"\"\"register tool server interface\"\"\"\n self.toolserver_interface = toolserver_interface\n\n def resister_logger(self, logger: Logger):\n \"\"\"\n 注册logger, 根据会话id创建日志文件夹, 并创建日志文件\n \"\"\"\n\n self.logger = logger\n self.logger.info(f\"init interaction: {self.base.interaction_id}\")\n\n def register_db(self, db: Session):\n \"\"\"\n 注册db\n\n Args:\n db: Session对象\n \"\"\"\n self.db = db\n\n def insert_data(self,\n data: dict,\n status=\"\",\n current: str = None,\n is_include_pictures: bool = False,):\n \"\"\"\n 更新缓存, 推送数据\n \"\"\"\n # check alive\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited.\")\n exit(0)\n self.current_step = uuid.uuid4().hex\n\n if status == \"inner\":\n tool_name = data.get(\"using_tools\", {}).get(\n \"tool_name\", \"\") if isinstance(data, dict) else \"\"\n\n if tool_name == \"subtask_submit\":\n status = StatusEnum.SUBMIT\n\n # download workspace files\n self.download_files()\n\n file_list = os.listdir(self.extract_dir)\n\n # insert raw\n process = XAgentRaw(\n node_id=self.current_step,\n interaction_id=self.base.interaction_id,\n current=current,\n step=0,\n data=data,\n file_list=file_list,\n status=status,\n do_interrupt=self.interrupt,\n wait_seconds=0,\n ask_for_human_help=False,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data=None,\n human_file_list=[],\n is_send=self.call_method != 'web',\n is_receive=False,\n include_pictures=is_include_pictures,\n )\n if status == StatusEnum.FINISHED:\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=StatusEnum.FINISHED,\n message=\"finished\",\n current_step=self.current_step)\n else:\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=\"running\",\n message=\"running\",\n current_step=self.current_step)\n InteractionCRUD.insert_raw(db=self.db, process=process)\n if self.call_method == \"web\":\n redis.set_key(self.base.interaction_id + \"_send\", 1)\n elif self.call_method == \"cmd\":\n # print workspace file list\n file_list_str = \", \".join(file_list) \n self.logger.typewriter_log(\n title=f\"-=-=-=-=-=-=-= {self.base.interaction_id}, {self.current_step}, WORKSPACE FILE LIST -=-=-=-=-=-=-=\\n\",\n title_color=Fore.GREEN,\n content=f\"[{file_list_str}] in {self.extract_dir}\"\n )\n\n def download_files(self):\n \"\"\"download files\n\n Returns:\n Boolean: True or False\n \"\"\"\n try:\n save_path = self.toolserver_interface.download_all_files()\n\n if os.path.exists(save_path):\n zip_file = zipfile.ZipFile(save_path)\n zip_list = zip_file.namelist() # 得到压缩包里所有文件\n for f in zip_list:\n zip_file.extract(f, self.extract_dir) # 循环解压文件到指定目录\n\n zip_file.close()\n return True\n except zipfile.BadZipFile:\n return False\n\n def receive(self, can_modify=None):\n \"\"\"\n 接收数据\n \"\"\"\n\n if self.call_method == \"web\":\n wait = 0\n while wait < self.wait_seconds:\n human_data = self.get_human_data()\n if human_data is not None:\n return human_data\n else:\n wait += 2\n time.sleep(2)\n\n raise XAgentTimeoutError(\"等待数据超时,关闭连接\")\n else:\n print(can_modify)\n\n def get_human_data(self):\n \"\"\"\n 获取人类数据\n \"\"\"\n # check alive, ensure the interaction is alive\n # if The user terminated this action and exited\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited!\")\n exit(0)\n receive_key = self.base.interaction_id + \"_\" + self.current_step + \"_receive\"\n is_receive = redis.get_key(receive_key)\n\n if is_receive:\n raw = InteractionCRUD.get_raw(\n db=self.db, interaction_id=self.base.interaction_id, node_id=self.current_step)\n\n if raw and raw.is_human and raw.is_receive:\n redis.delete_key(receive_key)\n return raw.human_data\n\n return None\n\n def ask_for_human_help(self, data):\n \"\"\"调用工具时,请求人类帮助\n Execute the tool and ask for human help\n \"\"\"\n\n self.current_step = uuid.uuid4().hex\n self.download_files()\n file_list = os.listdir(self.extract_dir)\n # special: ask for human help and do interrupt\n # send data\n process = XAgentRaw(\n node_id=self.current_step,\n interaction_id=self.base.interaction_id,\n current=self.current_step,\n step=0,\n data=data,\n file_list=file_list,\n status=StatusEnum.ASK_FOR_HUMAN_HELP,\n do_interrupt=True,\n wait_seconds=0,\n ask_for_human_help=True,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data=None,\n human_file_list=[],\n is_send=False,\n is_receive=False,\n include_pictures=False,\n )\n\n # insert into mysql\n InteractionCRUD.insert_raw(db=self.db, process=process)\n\n # set redis\n redis.set_key(self.base.interaction_id + \"_send\", 1)\n\n # set status\n\n InteractionCRUD.update_interaction_status(\n db=self.db,\n interaction_id=self.base.interaction_id,\n status=StatusEnum.ASK_FOR_HUMAN_HELP,\n message=\"ask for human help\",\n current_step=self.current_step)\n\n # check alive\n alive = redis.get_key(self.base.interaction_id)\n if alive == \"close\":\n self.logger.info(\"The user terminated this action and exited!\")\n exit(0)\n\n # wait for human data\n wait = 0\n while wait < self.wait_seconds:\n human_data = self.get_human_data()\n if human_data is not None:\n return human_data\n else:\n wait += 2\n time.sleep(2)\n\n raise XAgentTimeoutError(\"ASK-For-Human-Data: 等待数据超时,关闭连接\")" }, { "identifier": "Logger", "path": "XAgentServer/loggers/logs.py", "snippet": "class Logger(metaclass=abc.ABCMeta):\n \"\"\"\n Logger that handle titles in different colors.\n Outputs logs in console, activity.log, and errors.log\n For console handler: simulates typing\n \"\"\"\n\n def __init__(self, log_dir: str = None, log_name: str= \"\", log_file: str = \"activity.log\", error_file: str = \"errors.log\"):\n \"\"\"init\"\"\"\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n # create log directory if it doesn't exist\n self.log_name = time.strftime(\"%Y-%m-%d\", time.localtime()) if not log_name else log_name\n self.logger = logging.getLogger(self.log_name)\n console_formatter = RecordFormatter(\"%(title_color)s %(message)s\")\n\n # Create a handler for console which simulate typing\n self.typing_console_handler = TypingConsoleHandler()\n self.typing_console_handler.setLevel(logging.INFO)\n self.typing_console_handler.setFormatter(console_formatter)\n\n # Create a handler for console without typing simulation\n self.console_handler = ConsoleHandler()\n self.console_handler.setLevel(logging.DEBUG)\n self.console_handler.setFormatter(console_formatter)\n\n self.speak_mode = False\n self.chat_plugins = []\n\n # Info handler in activity.log\n self.file_handler = logging.FileHandler(\n os.path.join(log_dir, log_file), \"a\", \"utf-8\"\n )\n self.file_handler.setLevel(logging.DEBUG)\n info_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s: %(title_color)s %(title)s %(message)s\"\n )\n self.file_handler.setFormatter(info_formatter)\n\n # Error handler error.log\n error_handler = logging.FileHandler(\n os.path.join(log_dir, error_file), \"a\", \"utf-8\"\n )\n error_handler.setLevel(logging.ERROR)\n error_formatter = RecordFormatter(\n \"%(asctime)s [%(threadName)s] %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title_color)s %(title)s\"\n \" %(message_no_color)s\"\n )\n error_handler.setFormatter(error_formatter)\n\n # self.typing_logger = logging.getLogger(self.log_name)\n # if not self.typing_logger.handlers:\n # self.typing_logger.addHandler(self.typing_console_handler)\n # self.typing_logger.addHandler(self.file_handler)\n # self.typing_logger.addHandler(error_handler)\n # self.typing_logger.setLevel(logging.DEBUG)\n\n if self.log_name.endswith(\"_INTERACT\") or not self.logger.handlers:\n # self.logger.addHandler(self.typing_console_handler)\n self.logger.addHandler(self.console_handler)\n self.logger.addHandler(error_handler)\n self.logger.addHandler(self.file_handler)\n self.logger.setLevel(logging.DEBUG)\n \n def typewriter_log(\n self, title=\"\", title_color=\"\", content=\"\", speak_text=False, level=logging.INFO\n ):\n # if speak_text and self.speak_mode:\n # say_text(f\"{title}. {content}\")\n\n for plugin in self.chat_plugins:\n plugin.report(f\"{title}. {content}\")\n\n if content:\n if isinstance(content, list):\n content = \" \".join(content)\n else:\n content = \"\"\n\n self.logger.log(\n level, content, extra={\"title\": title, \"color\": title_color}\n )\n\n def debug(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.DEBUG)\n\n def info(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.INFO)\n\n def warn(\n self,\n message,\n title=\"\",\n title_color=\"\",\n ):\n self._log(title, title_color, message, logging.WARN)\n\n def error(self, title, message=\"\"):\n self._log(title, Fore.RED, message, logging.ERROR)\n\n def _log(\n self,\n title: str = \"\",\n title_color: str = \"\",\n message: str = \"\",\n level=logging.INFO,\n ):\n if message:\n if isinstance(message, list):\n message = \" \".join(message)\n self.logger.log(\n level, message, extra={\"title\": str(title), \"color\": str(title_color)}\n )\n\n def set_level(self, level):\n self.logger.setLevel(level)\n self.typing_logger.setLevel(level)\n\n def double_check(self, additionalText=None):\n if not additionalText:\n additionalText = (\n \"Please ensure you've setup and configured everything\"\n \" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to \"\n \"double check. You can also create a github issue or join the discord\"\n \" and ask there!\"\n )\n\n self.typewriter_log(\"DOUBLE CHECK CONFIGURATION\", Fore.YELLOW, additionalText)\n\n def log_json(self, data: Any, file_name: str) -> None:\n # Define log directory\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n\n # Create a handler for JSON files\n json_file_path = os.path.join(log_dir, file_name)\n json_data_handler = JsonFileHandler(json_file_path)\n json_data_handler.setFormatter(JsonFormatter())\n\n # Log the JSON data using the custom file handler\n self.json_logger.addHandler(json_data_handler)\n self.json_logger.debug(data)\n self.json_logger.removeHandler(json_data_handler)\n\n def get_log_directory(self):\n this_files_dir_path = os.path.dirname(__file__)\n log_dir = os.path.join(this_files_dir_path, \"../logs\")\n return os.path.abspath(log_dir)" }, { "identifier": "InteractionBase", "path": "XAgentServer/models/interaction.py", "snippet": "class InteractionBase(metaclass=abc.ABCMeta):\n def __init__(self,\n interaction_id: str,\n user_id: str,\n create_time: str,\n description: str,\n agent: str = \"\",\n mode: str = \"\",\n file_list: list = [],\n recorder_root_dir: str = \"\",\n status: str = \"\",\n message: str = \"\",\n current_step: str = \"\",\n update_time: str = \"\",\n is_deleted: bool = False,\n call_method: str = \"web\",\n ):\n self.interaction_id = interaction_id\n self.user_id = user_id\n self.create_time = create_time\n self.description = description\n self.agent = agent\n self.mode = mode\n self.file_list = file_list\n self.recorder_root_dir = recorder_root_dir\n self.status = status\n self.message = message\n self.current_step = current_step\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.call_method = call_method\n\n def to_dict(self, include=None, exclude=None):\n data = {\n \"interaction_id\": self.interaction_id,\n \"user_id\": self.user_id,\n \"create_time\": self.create_time,\n \"description\": self.description,\n \"agent\": self.agent,\n \"mode\": self.mode,\n \"file_list\": self.file_list,\n \"recorder_root_dir\": self.recorder_root_dir,\n \"status\": self.status,\n \"message\": self.message,\n \"current_step\": self.current_step,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"call_method\": self.call_method,\n }\n if include:\n data = {k: v for k, v in data.items() if k in include}\n if exclude:\n data = {k: v for k, v in data.items() if k not in exclude}\n return data\n \n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n \n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.user_id,\n interaction.create_time,\n interaction.description,\n interaction.agent,\n interaction.mode,\n interaction.file_list,\n interaction.recorder_root_dir,\n interaction.status,\n interaction.message,\n interaction.current_step,\n interaction.update_time,\n interaction.is_deleted,\n interaction.call_method,\n )" }, { "identifier": "InteractionParameter", "path": "XAgentServer/models/parameter.py", "snippet": "class InteractionParameter(metaclass=abc.ABCMeta):\n \"\"\"\n 交互参数\n \"\"\"\n\n def __init__(self,\n interaction_id: str,\n parameter_id: str,\n args: Union[str, dict, None] = None\n ):\n self.interaction_id = interaction_id\n self.args = args\n self.parameter_id = parameter_id\n\n def to_dict(self):\n return {\n \"interaction_id\": self.interaction_id,\n \"parameter_id\": self.parameter_id,\n \"args\": self.args,\n }\n\n def to_json(self):\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n return cls(**json_data)\n \n @classmethod\n def from_db(cls, interaction):\n return cls(interaction.interaction_id,\n interaction.parameter_id,\n interaction.args\n )" }, { "identifier": "XAgentRaw", "path": "XAgentServer/models/raw.py", "snippet": "class XAgentRaw(metaclass=abc.ABCMeta):\n \"\"\"XAgent Raw Object\"\"\"\n\n def __init__(self, node_id: str,\n interaction_id: str,\n current: str,\n step: int,\n data: dict,\n file_list: list,\n status: str,\n do_interrupt: bool,\n wait_seconds: int,\n ask_for_human_help: bool,\n create_time: str,\n update_time: str,\n is_deleted: bool,\n is_human: bool,\n human_data: dict,\n human_file_list: list,\n is_send: bool,\n is_receive: bool,\n include_pictures: bool = False,):\n self.node_id = node_id\n self.interaction_id = interaction_id\n self.current = current\n self.step = step\n self.data = data\n self.file_list = file_list\n self.status = status\n self.do_interrupt = do_interrupt\n self.wait_seconds = wait_seconds\n self.ask_for_human_help = ask_for_human_help\n self.create_time = create_time\n self.update_time = update_time\n self.is_deleted = is_deleted\n self.is_human = is_human\n self.human_data = human_data\n self.human_file_list = human_file_list\n self.is_send = is_send\n self.is_receive = is_receive\n self.include_pictures = include_pictures\n\n def to_dict(self):\n \"\"\"XAgent Raw Object to dict\"\"\"\n return {\n \"node_id\": self.node_id,\n \"interaction_id\": self.interaction_id,\n \"current\": self.current,\n \"step\": self.step,\n \"data\": self.data,\n \"file_list\": self.file_list,\n \"status\": self.status,\n \"do_interrupt\": self.do_interrupt,\n \"wait_seconds\": self.wait_seconds,\n \"ask_for_human_help\": self.ask_for_human_help,\n \"create_time\": self.create_time,\n \"update_time\": self.update_time,\n \"is_deleted\": self.is_deleted,\n \"is_human\": self.is_human,\n \"human_data\": self.human_data,\n \"human_file_list\": self.human_file_list,\n \"is_send\": self.is_send,\n \"is_receive\": self.is_receive,\n \"include_pictures\": self.include_pictures\n }\n\n def to_json(self):\n \"\"\"XAgent Raw Object to json\"\"\"\n return json.dumps(self.to_dict(), indent=2, ensure_ascii=False)\n\n @classmethod\n def from_json(cls, json_data):\n \"\"\"XAgent Raw Object from json\"\"\"\n return cls(**json_data)\n\n def update(self, update_data: dict):\n \"\"\"XAgent Raw Object update\"\"\"\n for k, v in update_data.items():\n setattr(self, k, v)\n return self\n\n @classmethod\n def from_db(cls, db_data):\n \"\"\"XAgent Raw Object from db\"\"\"\n return cls(\n node_id=db_data.node_id,\n interaction_id=db_data.interaction_id,\n current=db_data.current,\n step=db_data.step,\n data=db_data.data,\n file_list=db_data.file_list,\n status=db_data.status,\n do_interrupt=db_data.do_interrupt,\n wait_seconds=db_data.wait_seconds,\n ask_for_human_help=db_data.ask_for_human_help,\n create_time=db_data.create_time,\n update_time=db_data.update_time,\n is_deleted=db_data.is_deleted,\n is_human=db_data.is_human,\n human_data=db_data.human_data,\n human_file_list=db_data.human_file_list,\n is_send=db_data.is_send,\n is_receive=db_data.is_receive,\n include_pictures=db_data.include_pictures\n )" }, { "identifier": "XAgentServer", "path": "XAgentServer/server.py", "snippet": "class XAgentServer:\n \"\"\"XAgent Server Start Class\n \"\"\"\n\n def __init__(self, logger: Logger) -> None:\n self.logger: Logger = logger\n\n def interact(self, interaction: XAgentInteraction):\n # query = message\n \"\"\"\n XAgent Server Start Function\n \"\"\"\n from XAgent.config import CONFIG as config\n xagent_core = None\n try:\n config.reload()\n args = {}\n # args\n args = interaction.parameter.args\n\n self.logger.info(\n f\"server is running, the start query is {args.get('goal', '')}\")\n xagent_param = XAgentParam()\n\n # build query\n xagent_param.build_query({\n \"role_name\": \"Assistant\",\n \"task\": args.get(\"goal\", \"\"),\n \"plan\": args.get(\"plan\", [\"Pay attention to the language in initial goal, always answer with the same language of the initial goal given.\"]),\n })\n xagent_param.build_config(config)\n xagent_core = XAgentCoreComponents()\n # build XAgent Core Components\n xagent_core.build(xagent_param, interaction=interaction)\n json_str = json.dumps(\n xagent_param.config.to_dict(), indent=2)\n json_str=re.sub(r'\"api_key\": \"(.+?)\"', r'\"api_key\": \"**\"', json_str)\n self.logger.info(json_str)\n self.logger.typewriter_log(\n \"Human-In-The-Loop\",\n Fore.RED,\n str(xagent_param.config.enable_ask_human_for_help),\n )\n\n file_list = interaction.base.file_list\n for file in file_list:\n file_uuid = file.get(\"uuid\", \"\")\n file_name = file.get(\"name\", \"\")\n if file_uuid.startswith(\"/\"):\n file_path = file_uuid\n else:\n file_path = os.path.join(XAgentServerEnv.Upload.upload_dir,\n interaction.base.user_id, file_uuid)\n\n upload_dir = os.path.join(\n xagent_core.base_dir, \"upload\")\n if not os.path.exists(upload_dir):\n os.makedirs(upload_dir)\n # 拷贝到workspace\n if interaction.call_method == \"web\":\n shutil.copy(file_path, os.path.join(upload_dir, file_name))\n else:\n if os.path.exists(file_path):\n if os.path.samefile(file_path, os.path.join(upload_dir, file_name)):\n # 文件路径相同,跳过复制\n pass\n else:\n shutil.copy(file_path, os.path.join(upload_dir, file_name))\n # shutil.copy(file_path, os.path.join(upload_dir, file_name))\n\n new_file = os.path.join(upload_dir, file_name)\n try:\n xagent_core.toolserver_interface.upload_file(new_file)\n except Exception as e:\n self.logger.typewriter_log(\n \"Error happens when uploading file\",\n Fore.RED,\n f\"{new_file}\\n{e}\",\n )\n raise XAgentUploadFileError(str(e)) from e\n\n task_handler = TaskHandler(xagent_core=xagent_core,\n xagent_param=xagent_param)\n self.logger.info(\"Start outer loop async\")\n task_handler.outer_loop()\n except Exception as e:\n raise XAgentRunningError(str(e)) from e\n finally:\n if xagent_core is not None:\n xagent_core.close()" }, { "identifier": "InteractionCRUD", "path": "XAgentServer/application/cruds/interaction.py", "snippet": "class InteractionCRUD(metaclass=abc.ABCMeta):\n \"\"\"\n interaction crud\n \"\"\"\n\n @classmethod\n def search_many_interaction(cls, db: Session) -> list:\n \"\"\"\n search many interaction\n \"\"\"\n try:\n return InteractionDBInterface.search_many_interaction(db=db)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_interaction(cls, db: Session, interaction_id: str) -> InteractionBase | None:\n \"\"\"\n get interaction\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n interaction InteractionBase\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_interaction(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def create_interaction(cls, db: Session, base: InteractionBase):\n \"\"\"\n create interaction\n Args:\n db: db\n base: base\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.create_interaction(db=db, base=base)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n @classmethod\n def get_ready_interaction(cls, db: Session, user_id: str):\n \"\"\"\n create interaction\n Args:\n db: db\n user_id: user_id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_ready_interaction(db=db, user_id=user_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n\n @classmethod\n def add_parameter(cls, db: Session, parameter: InteractionParameter = None):\n \"\"\"\n add parameter\n Args:\n db: db\n parameter: parameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.add_parameter(db=db, parameter=parameter)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_parameter(cls, db: Session, interaction_id: str) -> list:\n \"\"\"\n get parameter\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n parameter list [InteractionParameter]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n \n @classmethod\n def get_init_parameter(cls, db: Session, interaction_id: str) -> InteractionParameter:\n \"\"\"\n get init parameter\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n parameter InteractionParameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n parameters = InteractionDBInterface.get_parameter(db=db, interaction_id=interaction_id)\n init_parameter = parameters[0]\n parameter = InteractionParameter.from_json({\"args\": init_parameter, \"interaction_id\": interaction_id, \"parameter_id\": None})\n return parameter\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_interaction_by_user_id(cls,\n db: Session,\n user_id: str,\n page_size: int = 10,\n page_num: int = 1) -> list[dict]:\n \"\"\"\n get interaction by user id\n Args:\n db: db\n user_id: user id\n page_size: page size\n page_num: page num\n Returns:\n interaction list [dict]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n return InteractionDBInterface.search_interaction_by_user_id(db=db,\n user_id=user_id,\n page_size=page_size,\n page_num=page_num)\n\n @classmethod\n def is_exist(cls, db: Session, interaction_id: str) -> bool:\n \"\"\"\n interaction is exist\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n True if interaction is exist, else False\n \n Raises:\n XAgentDBError: XAgent DB Error \n \"\"\"\n try:\n return InteractionDBInterface.is_exist(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction(cls, db: Session, base_data: dict):\n \"\"\"\n update interaction\n Args:\n db: db\n base_data: base data\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_interaction(db=db, base_data=base_data)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction_status(cls,\n db: Session,\n interaction_id: str,\n status: str,\n message: str,\n current_step: int):\n \"\"\"\n update interaction status\n Args:\n db: db\n interaction_id: interaction id\n status: status\n message: message\n current_step: current step\n \n Raises:\n XAgentDBError: XAgent DB Error \n \"\"\"\n try:\n InteractionDBInterface.update_interaction_status(\n db=db,\n interaction_id=interaction_id,\n status=status,\n message=message,\n current_step=current_step)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_interaction_parameter(cls,\n db: Session,\n interaction_id: str,\n parameter: InteractionParameter):\n \"\"\"\n update interaction parameter\n Args:\n db: db\n interaction_id: interaction id\n parameter: parameter\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_interaction_parameter(\n db=db,\n interaction_id=interaction_id,\n parameter=parameter)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def is_running(cls, db: Session, user_id: str):\n \"\"\"\n is running\n Args:\n db: db\n user_id: user id\n Returns:\n True if running, else False\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.is_running(db=db, user_id=user_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def delete_interaction(cls, db: Session, interaction_id: str):\n \"\"\"\n delete interaction\n Args:\n db: db\n interaction_id: interaction id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.delete_interaction(\n db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_shared_interaction(cls,\n db: Session,\n interaction_id: str) -> InteractionBase | None:\n \"\"\"\n get shared interaction\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n interaction InteractionBase, if not found, return None\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_shared_interaction(\n db=db,\n interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_many_shared(cls,\n db: Session,\n page_size: int = 20,\n page_index: int = 1) -> list[dict]:\n \"\"\"\n search many shared\n Args:\n db: db\n page_size: page size\n page_index: page index\n Returns:\n interaction list [dict]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.search_many_shared(db=db,\n page_size=page_size,\n page_index=page_index)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def insert_raw(cls, db: Session, process: XAgentRaw):\n \"\"\"\n insert raw\n Args:\n db: db\n process: process\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.insert_raw(db=db, process=process)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def search_many_raws(cls, db: Session, interaction_id: str) -> List[XAgentRaw] | None:\n \"\"\"\n search many raws\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n raw list [XAgentRaw]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return [XAgentRaw.from_db(raw) for raw in \n InteractionDBInterface.search_many_raws(db=db, interaction_id=interaction_id)]\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_raw(cls, db: Session, interaction_id: str, node_id: str) -> XAgentRaw | None:\n \"\"\"\n get raw\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n Returns:\n raw XAgentRaw, if not found, return None\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_raw(db=db,\n interaction_id=interaction_id,\n node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def get_next_send(cls, db: Session, interaction_id: str) -> List[Raw] | None:\n \"\"\"\n get next send\n Args:\n db: db\n interaction_id: interaction id\n Returns:\n raw list [Raw]\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n return InteractionDBInterface.get_next_send(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_send_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update send flag\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_send_flag(\n db=db, interaction_id=interaction_id, node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_receive_flag(cls, db: Session, interaction_id: str, node_id: str):\n \"\"\"\n update receive flag\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_receive_flag(\n db=db, interaction_id=interaction_id, node_id=node_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def update_human_data(cls,\n db: Session,\n interaction_id: str,\n node_id: str,\n human_data: dict):\n \"\"\"\n update human data\n Args:\n db: db\n interaction_id: interaction id\n node_id: node id\n human_data: human data\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.update_human_data(db=db,\n interaction_id=interaction_id,\n node_id=node_id,\n human_data=human_data)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def insert_error(cls,\n db: Session,\n interaction_id: str,\n message: str,\n status: str = \"failed\"):\n \"\"\"\n insert error\n Args:\n db: db\n interaction_id: interaction id\n message: message\n status: status, default is failed\n Returns:\n raw XAgentRaw\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n process = XAgentRaw(\n node_id=uuid.uuid4().hex,\n interaction_id=interaction_id,\n current=\"\",\n step=0,\n data=message,\n file_list=[],\n status=status,\n do_interrupt=False,\n wait_seconds=0,\n ask_for_human_help=False,\n create_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n update_time=datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n is_deleted=False,\n is_human=False,\n human_data={},\n human_file_list=[],\n is_send=False,\n is_receive=False,\n include_pictures=False,\n )\n InteractionDBInterface.insert_raw(db=db, process=process)\n return process\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n\n @classmethod\n def add_share(cls, db: Session, share):\n \"\"\"\n add share\n Args:\n db: db\n share: share\n \n Raises:\n XAgentDBError: XAgent DB Error\n \"\"\"\n try:\n InteractionDBInterface.add_share(db=db, shared=share)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e\n \n \n @classmethod\n def get_finish_status(cls, db: Session, interaction_id: str) -> bool:\n \"\"\"\n get finish status\n \n Args:\n db: db\n interaction_id: interaction id\n \n Returns:\n True if finish, else False\n \"\"\"\n try:\n return InteractionDBInterface.get_finish_status(db=db, interaction_id=interaction_id)\n except Exception as e:\n raise XAgentDBError(f\"XAgent DB Error [Interact Module]: {str(e)}\") from e" }, { "identifier": "redis", "path": "XAgentServer/application/global_val.py", "snippet": "def init_yag(logger):\ndef init_executor(logger):" }, { "identifier": "CommandLineInput", "path": "command_input.py", "snippet": "class CommandLineInput:\n \"\"\"\n Class for handling command line input.\n\n This child class extends from BaseInput and implements methods to handle and manage command line input data.\n\n Attributes:\n do_interrupt (bool): If True, input will be interrupted.\n max_wait_seconds (int): Maximum wait time for input in seconds.\n \"\"\"\n def __init__(self,\n do_interrupt: bool = False,\n max_wait_seconds: int = 600,\n logger=None):\n self.do_interrupt = do_interrupt\n self.max_wait_seconds = max_wait_seconds\n self.logger = logger\n\n def run(self, input_data):\n \"\"\"\n Run the command line input method.\n\n Args:\n input_data (Any): The original input data to be processed.\n\n Returns:\n data (Any): The processed input data.\n \"\"\"\n if self.do_interrupt:\n data = self.interrupt(input_data)\n else:\n data = input_data\n return data\n \n def get_each_input(self, key, value, res, timeout):\n \"\"\"\n Returns the input from the command line for a single key-value pair.\n\n Args:\n key (str): The key for which to get input.\n value (Any): The current value associated with the key.\n res (dict): The result dictionary where inputs collected will be stored.\n timeout (int): Timeout in seconds for the input.\n\n Returns:\n Any: The input data.\n \"\"\"\n self.logger.typewriter_log(\n f\"Now, ASK For {key}, Origin Input: {value}\",\n Fore.RED,\n f\"\"\n )\n self.logger.typewriter_log(\n f\"Now, you can modify the current field by entering some information, and then press 'Enter' to continue, if you want to keep the original input, please enter '-1' and then press 'Enter':\",\n Fore.GREEN\n )\n temp = inputimeout(prompt=f'You have {timeout} seconds to input:\\n', timeout=timeout)\n if temp == \"-1\":\n return value\n else:\n return temp\n \n def get_input(self, origin_data):\n \"\"\"\n Get input for all fields of the original data from the command line.\n\n Args:\n origin_data (dict): The original data for which to get input.\n\n Returns:\n dict: The dictionary with updated inputs.\n \"\"\"\n self.logger.typewriter_log(\n \"Next, you can start modifying the original input by typing 'Y/y/yes' or skip this step by typing 'N/n/no' and then press 'Enter' to continue the loop:\",\n Fore.RED\n )\n update = inputimeout(prompt=f'You have to make a decision within 60 seconds:\\n', timeout=60)\n res = {\"args\": {}}\n if update in ['y', 'Y', 'yes']:\n execute_time = self.max_wait_seconds\n if isinstance(origin_data, dict):\n args = origin_data.get(\"args\", \"\")\n self.logger.typewriter_log(\n f\"Next, you will have a total of {self.max_wait_seconds} seconds to modify each option:\",\n Fore.RED,\n )\n for key, value in args.items():\n if key == \"done\":\n res[key] = False\n continue\n start_time = time.time()\n res[\"args\"][key] = self.get_each_input(key, value, res, execute_time)\n end_time = time.time()\n execute_time = math.floor(execute_time - (end_time - start_time))\n self.logger.info(f\"modify the input, receive the data: {res}\")\n else:\n res = origin_data\n self.logger.info(\"skip this step\")\n self.logger.info(\"continue the loop\")\n res[\"done\"] = True\n return res\n \n def interrupt(self, input_data):\n \"\"\"\n Interrupts the current input process and returns the current data.\n\n Args:\n input_data (dict): The original input data.\n\n Returns:\n dict: The current data collected so far.\n\n Raises:\n XAgentIOTimeoutError: If the input times out.\n \"\"\"\n try:\n data = self.get_input(input_data)\n return data\n except TimeoutOccurred:\n self.logger.error(f\"Waiting timemout, close connection!\")\n raise XAgentTimeoutError(\"timeout!\")" } ]
import asyncio import json import os import threading import traceback import uuid import sys from contextlib import contextmanager from datetime import datetime from typing import List from colorama import Fore from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.schedulers.blocking import BlockingScheduler from XAgentServer.application.core.envs import XAgentServerEnv from XAgentServer.database.connect import SessionLocal from XAgentServer.enums.status import StatusEnum from XAgentServer.exts.exception_ext import XAgentError from XAgentServer.interaction import XAgentInteraction from XAgentServer.loggers.logs import Logger from XAgentServer.models.interaction import InteractionBase from XAgentServer.models.parameter import InteractionParameter from XAgentServer.models.raw import XAgentRaw from XAgentServer.server import XAgentServer from XAgentServer.application.cruds.interaction import InteractionCRUD from XAgentServer.application.global_val import redis from command_input import CommandLineInput from XAgent.running_recorder import recorder
13,964
self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id parameter = InteractionParameter( interaction_id=self.client_id, parameter_id=uuid.uuid4().hex, args={ "goal": goal, "plan": plan }, ) InteractionCRUD.add_parameter(db=db, parameter=parameter) def run(self): """ Runs the interaction with the XAgentServer with the provided arguments. """ # Create a new raw data to record with get_db() as db: InteractionCRUD.insert_raw(db=db, process=XAgentRaw( interaction_id=self.client_id, node_id=uuid.uuid4().hex, status=StatusEnum.RUNNING, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), current="", step=-1, data=None, file_list=[], do_interrupt=self.interrupt, wait_seconds=0, ask_for_human_help=False, is_human=True, human_data={"goal": self.args.task, "plan": self.args.plan}, human_file_list=self.args.upload_files, is_send=True, is_receive=False, is_deleted=False )) redis.set_key(f"{self.client_id}_send", 1) parameter = InteractionCRUD.get_init_parameter( db=db, interaction_id=self.client_id) self.task_handler(parameter=parameter) def task_handler(self, parameter: InteractionParameter): """ define a long task to run interaction Args: parameter (InteractionParameter): The parameter of interaction """ try: current_step = uuid.uuid4().hex with get_db() as db: base = InteractionCRUD.get_interaction(db=db, interaction_id=self.client_id) InteractionCRUD.update_interaction_status(db=db, interaction_id=base.interaction_id, status="running", message="running", current_step=current_step) # if mode is not auto, we will interrupt the interaction # and you can change the wait_seconds # default 10 min.
@contextmanager def get_db(): """ Provide a transactional scope around a series of operations. """ session = SessionLocal() try: yield session session.commit() except: session.rollback() raise finally: session.close() class CommandLineParam: """Command line parameters. Attributes: task: Task description. role: Role name (default is "Assistant"). plan: List of steps to perform (default is empty list). upload_files: List of files to upload (default is empty list). download_files: List of files to download (default is empty list). record_dir: Directory to store records (default is `None`). mode: Run mode. Can be "auto" (default is "auto"). max_wait_seconds: Maximum wait time in seconds (default is 600). description: Description of the interaction (default is "XAgent-Test"). agent: Agent name (default is "XAgent"). """ def __init__(self, task, role="Assistant", plan=[], upload_files: List[str] = [], download_files: List[str] = [], record_dir: str = None, mode: str = "auto", max_wait_seconds: int = 600, description: str = "XAgent-Test", agent: str = "XAgent", ): self.task = task self.plan = plan self.role = role self.upload_files = upload_files self.download_files = download_files self.record_dir = record_dir # auto is supported only in cmd self.mode = "auto" self.max_wait_seconds = max_wait_seconds self.description = description self.agent = agent class CommandLine(): """ A command-line interface for interacting with XAgentServer. Attributes: env: An instance of the XAgentServer environment. client_id: A unique identifier for the client, generated as a hexadecimal UUID. date_str: The current date as a string in YYYY-MM-DD format. log_dir: The directory where the logs are stored. logger: An instance of the Logger used for logging interactions. interactionDB: A database interface for interacting with either a persistent database (SQLite, MySQL, PostgreSQL) or a local storage file, depending on the configuration of `env`. """ def __init__(self, args: CommandLineParam = None): """ Initialize the CommandLine instance. Args: args (CommandLineParam) : parameters. task is required, mode options: ["auto"] """ self.args = args self.client_id = uuid.uuid4().hex self.date_str = datetime.now().strftime("%Y-%m-%d") self.log_dir = os.path.join(os.path.join(XAgentServerEnv.base_dir, "localstorage", "interact_records"), self.date_str, self.client_id) if not os.path.exists(self.log_dir): os.makedirs(self.log_dir) self.logger = Logger(log_dir=self.log_dir, log_file=f"interact.log") self.logger.typewriter_log( title=f"XAgentServer is running on cmd mode", title_color=Fore.RED) self.logger.info(title=f"XAgentServer log:", title_color=Fore.RED, message=f"{self.log_dir}") self.interrupt = self.args.mode != "auto" self.init_conv_env() self.max_wait_seconds = self.args.max_wait_seconds self.scheduler = AsyncIOScheduler() self.input = None if self.interrupt: self.input = CommandLineInput( do_interrupt=True, max_wait_seconds=self.max_wait_seconds, logger=self.logger) def init_conv_env(self): """initialize the conversation environment, Share the same database resource with webui. If you have initiated a session on the front end but it has not been executed, this ID will be shared. """ user_id = "guest" token = "xagent" description = self.args.description upload_files = self.args.upload_files record_dir = self.args.record_dir agent = self.args.agent goal = self.args.task mode = self.args.mode plan = self.args.plan with get_db() as db: interaction = InteractionCRUD.get_ready_interaction( db=db, user_id=user_id) self.continue_flag = True upload_files = upload_files if upload_files else [] file_list = [] for file in upload_files: file_list.append({ "uuid": file, "name": file }) if interaction is None: base = InteractionBase(interaction_id=self.client_id, user_id=user_id, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), description=description, agent=agent, mode=mode, file_list=file_list, recorder_root_dir="", status="ready", message="ready...", current_step="-1", update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), call_method="cmd") InteractionCRUD.create_interaction(db=db, base=base) else: self.client_id = interaction.interaction_id parameter = InteractionParameter( interaction_id=self.client_id, parameter_id=uuid.uuid4().hex, args={ "goal": goal, "plan": plan }, ) InteractionCRUD.add_parameter(db=db, parameter=parameter) def run(self): """ Runs the interaction with the XAgentServer with the provided arguments. """ # Create a new raw data to record with get_db() as db: InteractionCRUD.insert_raw(db=db, process=XAgentRaw( interaction_id=self.client_id, node_id=uuid.uuid4().hex, status=StatusEnum.RUNNING, create_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), update_time=datetime.now().strftime("%Y-%m-%d %H:%M:%S"), current="", step=-1, data=None, file_list=[], do_interrupt=self.interrupt, wait_seconds=0, ask_for_human_help=False, is_human=True, human_data={"goal": self.args.task, "plan": self.args.plan}, human_file_list=self.args.upload_files, is_send=True, is_receive=False, is_deleted=False )) redis.set_key(f"{self.client_id}_send", 1) parameter = InteractionCRUD.get_init_parameter( db=db, interaction_id=self.client_id) self.task_handler(parameter=parameter) def task_handler(self, parameter: InteractionParameter): """ define a long task to run interaction Args: parameter (InteractionParameter): The parameter of interaction """ try: current_step = uuid.uuid4().hex with get_db() as db: base = InteractionCRUD.get_interaction(db=db, interaction_id=self.client_id) InteractionCRUD.update_interaction_status(db=db, interaction_id=base.interaction_id, status="running", message="running", current_step=current_step) # if mode is not auto, we will interrupt the interaction # and you can change the wait_seconds # default 10 min.
interaction = XAgentInteraction(
4
2023-10-16 03:44:57+00:00
16k
deepseek-ai/DreamCraft3D
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig[\"z_channels\"], 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n self.log(\n \"aeloss\",\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"train\",\n )\n\n self.log(\n \"discloss\",\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True,\n )\n self.log_dict(\n log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False\n )\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split=\"val\",\n )\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters())\n + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9),\n )\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)\n )\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "IdentityFirstStage", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "extern/ldm_zero123/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def to(self, device):\n \"\"\"Same as to in torch module\n Don't really underestand why this isn't a module in the first place\"\"\"\n for k, v in self.__dict__.items():\n if isinstance(v, torch.Tensor):\n new_v = getattr(self, k).to(device)\n setattr(self, k, new_v)\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(\n self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0.0, verbose=True\n ):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose,\n )\n alphas_cumprod = self.model.alphas_cumprod\n assert (\n alphas_cumprod.shape[0] == self.ddpm_num_timesteps\n ), \"alphas have to be defined for each timestep\"\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer(\"betas\", to_torch(self.model.betas))\n self.register_buffer(\"alphas_cumprod\", to_torch(alphas_cumprod))\n self.register_buffer(\n \"alphas_cumprod_prev\", to_torch(self.model.alphas_cumprod_prev)\n )\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer(\n \"sqrt_alphas_cumprod\", to_torch(np.sqrt(alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_one_minus_alphas_cumprod\",\n to_torch(np.sqrt(1.0 - alphas_cumprod.cpu())),\n )\n self.register_buffer(\n \"log_one_minus_alphas_cumprod\", to_torch(np.log(1.0 - alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recip_alphas_cumprod\", to_torch(np.sqrt(1.0 / alphas_cumprod.cpu()))\n )\n self.register_buffer(\n \"sqrt_recipm1_alphas_cumprod\",\n to_torch(np.sqrt(1.0 / alphas_cumprod.cpu() - 1)),\n )\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose,\n )\n self.register_buffer(\"ddim_sigmas\", ddim_sigmas)\n self.register_buffer(\"ddim_alphas\", ddim_alphas)\n self.register_buffer(\"ddim_alphas_prev\", ddim_alphas_prev)\n self.register_buffer(\"ddim_sqrt_one_minus_alphas\", np.sqrt(1.0 - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev)\n / (1 - self.alphas_cumprod)\n * (1 - self.alphas_cumprod / self.alphas_cumprod_prev)\n )\n self.register_buffer(\n \"ddim_sigmas_for_original_num_steps\", sigmas_for_original_sampling_steps\n )\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.0,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n **kwargs,\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list):\n ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(\n f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\"\n )\n\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\"\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n t_start=-1,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = (\n self.ddpm_num_timesteps\n if ddim_use_original_steps\n else self.ddim_timesteps\n )\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = (\n int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]\n )\n - 1\n )\n timesteps = self.ddim_timesteps[:subset_end]\n\n timesteps = timesteps[:t_start]\n\n intermediates = {\"x_inter\": [img], \"pred_x0\": [img]}\n time_range = (\n reversed(range(0, timesteps))\n if ddim_use_original_steps\n else np.flip(timesteps)\n )\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"DDIM Sampler\", total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts\n ) # TODO: deterministic forward pass?\n img = img_orig * mask + (1.0 - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n )\n img, pred_x0 = outs\n if callback:\n img = callback(i, img, pred_x0)\n if img_callback:\n img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates[\"x_inter\"].append(img)\n intermediates[\"pred_x0\"].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(\n self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n ):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.0:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat([unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n else:\n c_in[k] = torch.cat([unconditional_conditioning[k], c[k]])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(\n self.model, e_t, x, t, c, **corrector_kwargs\n )\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = (\n self.model.alphas_cumprod_prev\n if use_original_steps\n else self.ddim_alphas_prev\n )\n sqrt_one_minus_alphas = (\n self.model.sqrt_one_minus_alphas_cumprod\n if use_original_steps\n else self.ddim_sqrt_one_minus_alphas\n )\n sigmas = (\n self.model.ddim_sigmas_for_original_num_steps\n if use_original_steps\n else self.ddim_sigmas\n )\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full(\n (b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device\n )\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n\n print(t, sqrt_one_minus_at, a_t)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n pred_x0 = norm_thresholding(pred_x0, dynamic_threshold)\n\n # direction pointing to x_t\n dir_xt = (1.0 - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(\n self,\n x0,\n c,\n t_enc,\n use_original_steps=False,\n return_intermediates=None,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n ):\n num_reference_steps = (\n self.ddpm_num_timesteps\n if use_original_steps\n else self.ddim_timesteps.shape[0]\n )\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc=\"Encoding Image\"):\n t = torch.full(\n (x0.shape[0],), i, device=self.model.device, dtype=torch.long\n )\n if unconditional_guidance_scale == 1.0:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(\n torch.cat((x_next, x_next)),\n torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c)),\n ),\n 2,\n )\n noise_pred = e_t_uncond + unconditional_guidance_scale * (\n noise_pred - e_t_uncond\n )\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = (\n alphas_next[i].sqrt()\n * ((1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt())\n * noise_pred\n )\n x_next = xt_weighted + weighted_noise_pred\n if (\n return_intermediates\n and i % (num_steps // return_intermediates) == 0\n and i < num_steps - 1\n ):\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n\n out = {\"x_encoded\": x_next, \"intermediate_steps\": inter_steps}\n if return_intermediates:\n out.update({\"intermediates\": intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise\n )\n\n @torch.no_grad()\n def decode(\n self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n ):\n timesteps = (\n np.arange(self.ddpm_num_timesteps)\n if use_original_steps\n else self.ddim_timesteps\n )\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc=\"Decoding image\", total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full(\n (x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long\n )\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return x_dec" }, { "identifier": "CrossAttention", "path": "extern/ldm_zero123/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)\n )\n\n self.lora = False\n self.query_dim = query_dim\n self.inner_dim = inner_dim\n self.context_dim = context_dim\n\n def setup_lora(self, rank=4, network_alpha=None):\n self.lora = True\n self.rank = rank\n self.to_q_lora = LoRALinearLayer(self.query_dim, self.inner_dim, rank, network_alpha)\n self.to_k_lora = LoRALinearLayer(self.context_dim, self.inner_dim, rank, network_alpha)\n self.to_v_lora = LoRALinearLayer(self.context_dim, self.inner_dim, rank, network_alpha)\n self.to_out_lora = LoRALinearLayer(self.inner_dim, self.query_dim, rank, network_alpha)\n self.lora_layers = nn.ModuleList()\n self.lora_layers.append(self.to_q_lora)\n self.lora_layers.append(self.to_k_lora)\n self.lora_layers.append(self.to_v_lora)\n self.lora_layers.append(self.to_out_lora)\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n if self.lora:\n q += self.to_q_lora(x)\n k += self.to_k_lora(context)\n v += self.to_v_lora(context)\n\n q, k, v = map(lambda t: rearrange(t, \"b n (h d) -> (b h) n d\", h=h), (q, k, v))\n\n sim = einsum(\"b i d, b j d -> b i j\", q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, \"b ... -> b (...)\")\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, \"b j -> (b h) () j\", h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum(\"b i j, b j d -> b i d\", attn, v)\n out = rearrange(out, \"(b h) n d -> b n (h d)\", h=h)\n # return self.to_out(out)\n\n # linear proj\n o = self.to_out[0](out)\n if self.lora:\n o += self.to_out_lora(out)\n # dropout\n out = self.to_out[1](o)\n\n return out" }, { "identifier": "extract_into_tensor", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(\n schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3\n):\n if schedule == \"linear\":\n betas = (\n torch.linspace(\n linear_start**0.5, linear_end**0.5, n_timestep, dtype=torch.float64\n )\n ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64\n )\n elif schedule == \"sqrt\":\n betas = (\n torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n ** 0.5\n )\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "extern/ldm_zero123/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1,) * (len(shape) - 1))\n )\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(\n device=self.parameters.device\n )\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(\n device=self.parameters.device\n )\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.0])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3],\n )\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var\n - 1.0\n - self.logvar\n + other.logvar,\n dim=[1, 2, 3],\n )\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.0])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims,\n )\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "extern/ldm_zero123/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "LitEma", "path": "extern/ldm_zero123/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError(\"Decay must be between 0 and 1\")\n\n self.m_name2s_name = {}\n self.register_buffer(\"decay\", torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n \"num_updates\",\n torch.tensor(0, dtype=torch.int)\n if use_num_upates\n else torch.tensor(-1, dtype=torch.int),\n )\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace(\".\", \"\")\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay * (shadow_params[sname] - m_param[key])\n )\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "extern/ldm_zero123/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "default", "path": "extern/ldm_zero123/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "extern/ldm_zero123/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "extern/ldm_zero123/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == \"__is_first_stage__\":\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "isimage", "path": "extern/ldm_zero123/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "extern/ldm_zero123/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "extern/ldm_zero123/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype(\"data/DejaVuSans.ttf\", size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(\n xc[bi][start : start + nc] for start in range(0, len(xc[bi]), nc)\n )\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "extern/ldm_zero123/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,321
else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [ self.first_stage_model.decode( z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize, ) for i in range(z.shape[-1]) ] else: output_list = [ self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode( z, force_not_quantize=predict_cids or force_not_quantize ) else: return self.first_stage_model.decode(z) # @torch.no_grad() # wasted two hours to find this bug... why no grad here! def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params["original_image_size"] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( x, ks, stride, df=df ) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) output_list = [ self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1]) ] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() if self.model.conditioning_key is not None: assert c is not None # if self.cond_stage_trainable: # c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = ( "c_concat" if self.model.conditioning_key == "concat" else "c_crossattn" ) cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold( x_noisy, ks, stride ) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if ( self.cond_stage_key in ["image", "LR_image", "segmentation", "bbox_img"] and self.model.conditioning_key ): # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert len(c) == 1 # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view( (c.shape[0], -1, ks[0], ks[1], c.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == "coordinates_bbox": assert ( "original_image_size" in self.split_input_params ), "BoudingBoxRescaling is missing original_image_size" # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params["original_image_size"] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [ ( rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h, ) for patch_nr in range(z.shape[-1]) ] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [ ( x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h, ) for x_tl, y_tl in tl_patch_coordinates ] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [ torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to( self.device ) for bbox in patch_limits ] # list of length l with tensors of shape (1, 2) # cut tknzd crop position from conditioning assert isinstance(cond, dict), "cond must be dict to be fed into model" cut_cond = cond["c_crossattn"][0][..., :-2].to(self.device) adapted_cond = torch.stack( [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd] ) adapted_cond = rearrange(adapted_cond, "l b n -> (l b) n") adapted_cond = self.get_learned_conditioning(adapted_cond) adapted_cond = rearrange( adapted_cond, "(l b) n d -> l b n d", l=z.shape[-1] ) cond_list = [{"c_crossattn": [e]} for e in adapted_cond] else: cond_list = [ cond for i in range(z.shape[-1]) ] # Todo make this more efficient # apply model by loop over crops output_list = [ self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1]) ] assert not isinstance( output_list[0], tuple ) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl( mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0 ) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = "train" if self.training else "val" if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f"{prefix}/loss_simple": loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f"{prefix}/loss_gamma": loss.mean()}) loss_dict.update({"logvar": self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f"{prefix}/loss_vlb": loss_vlb}) loss += self.original_elbo_weight * loss_vlb loss_dict.update({f"{prefix}/loss": loss}) return loss, loss_dict def p_mean_variance( self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None, ): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score( self, model_out, x, t, c, **corrector_kwargs ) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1.0, 1.0) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample( self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, ): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance( x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.0: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * ( 0.5 * model_log_variance ).exp() * noise, logits.argmax(dim=1) if return_x0: return ( model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0, ) else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising( self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1.0, noise_dropout=0.0, score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm( reversed(range(0, timesteps)), desc="Progressive Generation", total=timesteps, ) if verbose else reversed(range(0, timesteps)) ) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs, ) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop( self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None, ): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = ( tqdm(reversed(range(0, timesteps)), desc="Sampling t", total=timesteps) if verbose else reversed(range(0, timesteps)) ) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != "hybrid" tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample( img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, ) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1.0 - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample( self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs, ): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = { key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond } else: cond = ( [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] ) return self.p_sample_loop( cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0, ) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample( ddim_steps, batch_size, shape, cond, verbose=False, **kwargs ) else: samples, intermediates = self.sample( cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs ) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning( self, batch_size, null_label=None, image_size=512 ): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: # todo: get null label from cond_stage_model raise NotImplementedError() c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) cond = {} cond["c_crossattn"] = [c] cond["c_concat"] = [ torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to( self.device ) ] return cond @torch.no_grad() def log_images( self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1.0, return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1.0, unconditional_guidance_label=None, use_ema_scope=True, **kwargs, ): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input( batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N, ) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25, ) log["conditioning"] = xc elif self.cond_stage_key == "class_label": xc = log_txt_as_img( (x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25, ) log["conditioning"] = xc elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
16
2023-10-23 07:40:20+00:00
16k
zju3dv/4K4D
easyvolcap/utils/gl_utils.py
[ { "identifier": "dotdict", "path": "easyvolcap/utils/base_utils.py", "snippet": "class dotdict(dict, Dict[KT, VT]):\n \"\"\"\n This is the default data passing object used throughout the codebase\n Main function: dot access for dict values & dict like merging and updates\n\n a dictionary that supports dot notation \n as well as dictionary access notation \n usage: d = make_dotdict() or d = make_dotdict{'val1':'first'})\n set attributes: d.val2 = 'second' or d['val2'] = 'second'\n get attributes: d.val2 or d['val2']\n \"\"\"\n\n def update(self, dct: Dict = None, **kwargs):\n dct = copy(dct) # avoid modifying the original dict, use super's copy to avoid recursion\n\n # Handle different arguments\n if dct is None:\n dct = kwargs\n elif isinstance(dct, Mapping):\n dct.update(kwargs)\n else:\n super().update(dct, **kwargs)\n return\n\n # Recursive updates\n for k, v in dct.items():\n if k in self:\n\n # Handle type conversions\n target_type = type(self[k])\n if not isinstance(v, target_type):\n # NOTE: bool('False') will be True\n if target_type == bool and isinstance(v, str):\n dct[k] = v == 'True'\n else:\n dct[k] = target_type(v)\n\n if isinstance(v, dict):\n self[k].update(v) # recursion from here\n else:\n self[k] = v\n else:\n if isinstance(v, dict):\n self[k] = dotdict(v) # recursion?\n else:\n self[k] = v\n return self\n\n def __init__(self, *args, **kwargs):\n self.update(*args, **kwargs)\n\n copy = return_dotdict(dict.copy)\n fromkeys = return_dotdict(dict.fromkeys)\n\n # def __hash__(self):\n # # return hash(''.join([str(self.values().__hash__())]))\n # return super(dotdict, self).__hash__()\n\n # def __init__(self, *args, **kwargs):\n # super(dotdict, self).__init__(*args, **kwargs)\n\n \"\"\"\n Uncomment following lines and \n comment out __getattr__ = dict.__getitem__ to get feature:\n \n returns empty numpy array for undefined keys, so that you can easily copy things around\n TODO: potential caveat, harder to trace where this is set to np.array([], dtype=np.float32)\n \"\"\"\n\n def __getitem__(self, key):\n try:\n return dict.__getitem__(self, key)\n except KeyError as e:\n raise AttributeError(e)\n # MARK: Might encounter exception in newer version of pytorch\n # Traceback (most recent call last):\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/queues.py\", line 245, in _feed\n # obj = _ForkingPickler.dumps(obj)\n # File \"/home/xuzhen/miniconda3/envs/torch/lib/python3.9/multiprocessing/reduction.py\", line 51, in dumps\n # cls(buf, protocol).dump(obj)\n # KeyError: '__getstate__'\n # MARK: Because you allow your __getattr__() implementation to raise the wrong kind of exception.\n # FIXME: not working typing hinting code\n __getattr__: Callable[..., 'torch.Tensor'] = __getitem__ # type: ignore # overidden dict.__getitem__\n __getattribute__: Callable[..., 'torch.Tensor'] # type: ignore\n # __getattr__ = dict.__getitem__\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__\n\n # TODO: better ways to programmically define these special variables?\n\n @property\n def meta(self) -> dotdict:\n # Special variable used for storing cpu tensor in batch\n if 'meta' not in self:\n self.meta = dotdict()\n return self.__getitem__('meta')\n\n @meta.setter\n def meta(self, meta):\n self.__setitem__('meta', meta)\n\n @property\n def output(self) -> dotdict: # late annotation needed for this\n # Special entry for storing output tensor in batch\n if 'output' not in self:\n self.output = dotdict()\n return self.__getitem__('output')\n\n @output.setter\n def output(self, output):\n self.__setitem__('output', output)\n\n @property\n def persistent(self) -> dotdict: # late annotation needed for this\n # Special entry for storing persistent tensor in batch\n if 'persistent' not in self:\n self.persistent = dotdict()\n return self.__getitem__('persistent')\n\n @persistent.setter\n def persistent(self, persistent):\n self.__setitem__('persistent', persistent)\n\n @property\n def type(self) -> str: # late annotation needed for this\n # Special entry for type based construction system\n return self.__getitem__('type')\n\n @type.setter\n def type(self, type):\n self.__setitem__('type', type)\n\n def to_dict(self):\n out = dict()\n for k, v in self.items():\n if isinstance(v, dotdict):\n v = v.to_dict() # recursion point\n out[k] = v\n return out" }, { "identifier": "Camera", "path": "easyvolcap/utils/viewer_utils.py", "snippet": "class Camera:\n # Helper class to manage camera parameters\n def __init__(self,\n H: int = 512,\n W: int = 512,\n K: torch.Tensor = torch.tensor([[512.0, 0.0, 256], [0.0, 512.0, 256.0], [0.0, 0.0, 1.0]]), # intrinsics\n R: torch.Tensor = torch.tensor([[-1.0, 0.0, 0.0,], [0.0, 0.0, -1.0,], [0.0, -1.0, 0.0,]]), # extrinsics\n T: torch.Tensor = torch.tensor([[0.0], [0.0], [-3.0],]), # extrinsics\n n: float = 0.002, # bounds limit\n f: float = 100, # bounds limit\n t: float = 0.0, # temporal dimension (implemented as a float instead of int)\n v: float = 0.0, # view dimension (implemented as a float instead of int)\n bounds: torch.Tensor = torch.tensor([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]), # bounding box\n\n # camera update hyperparameters\n origin: torch.Tensor = torch.tensor([0.0, 0.0, 0.0]),\n world_up: torch.Tensor = torch.tensor([0.0, 0.0, 1.0]),\n movement_speed: float = 1.0, # gui movement speed\n\n batch: dotdict = None, # will ignore all other inputs\n string: str = None, # will ignore all other inputs\n **kwargs,\n ) -> None:\n\n # Batch (network input parameters)\n if string is None:\n if batch is None:\n batch = dotdict()\n batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds = H, W, K, R, T, n, f, t, v, bounds\n self.from_batch(batch)\n \n # Other configurables\n self.origin = vec3(*origin)\n self.world_up = vec3(*world_up)\n self.movement_speed = movement_speed\n # self.front = self.front # will trigger an update\n else:\n self.from_string(string)\n\n # Internal states to facilitate camera position change\n self.is_dragging = False # rotation\n self.about_origin = False # about origin rotation\n self.is_panning = False # translation\n self.lock_fx_fy = True\n\n @property\n def w2p(self):\n ixt = mat4(self.ixt)\n ixt[3, 3] = 0\n ixt[2, 3] = 1\n return ixt @ self.ext # w2c -> c2p = w2p\n\n @property\n def V(self): return self.c2w\n\n @property\n def ixt(self): return self.K\n\n @property\n def gl_ext(self):\n gl_c2w = self.c2w\n gl_c2w[0] *= 1 # flip x\n gl_c2w[1] *= -1 # flip y\n gl_c2w[2] *= -1 # flip z\n gl_ext = glm.affineInverse(gl_c2w)\n return gl_ext # use original opencv ext since we've taken care of the intrinsics in gl_ixt\n\n @property\n def gl_ixt(self):\n # Construct opengl camera matrix with projection & clipping\n # https://fruty.io/2019/08/29/augmented-reality-with-opencv-and-opengl-the-tricky-projection-matrix/\n # https://gist.github.com/davegreenwood/3a32d779f81f08dce32f3bb423672191\n # fmt: off\n gl_ixt = mat4(\n 2 * self.fx / self.W, 0, 0, 0,\n 2 * self.s / self.W, 2 * self.fy / self.H, 0, 0,\n 1 - 2 * (self.cx / self.W), 2 * (self.cy / self.H) - 1, (self.f + self.n) / (self.n - self.f), -1,\n 0, 0, 2 * self.f * self.n / (self.n - self.f), 0,\n )\n # fmt: on\n\n return gl_ixt\n\n @property\n def ext(self): return self.w2c\n\n @property\n def w2c(self):\n w2c = mat4(self.R)\n w2c[3] = vec4(*self.T, 1.0)\n return w2c\n\n @property\n def c2w(self):\n return glm.affineInverse(self.w2c)\n\n @property\n def right(self) -> vec3: return vec3(self.R[0, 0], self.R[1, 0], self.R[2, 0]) # c2w R, 0 -> 3,\n\n @property\n def down(self) -> vec3: return vec3(self.R[0, 1], self.R[1, 1], self.R[2, 1]) # c2w R, 1 -> 3,\n\n @property\n def front(self) -> vec3: return vec3(self.R[0, 2], self.R[1, 2], self.R[2, 2]) # c2w R, 2 -> 3,\n\n @front.setter\n def front(self, v: vec3):\n front = v # the last row of R\n self.R[0, 2], self.R[1, 2], self.R[2, 2] = front.x, front.y, front.z\n right = glm.normalize(glm.cross(self.front, self.world_up)) # right\n self.R[0, 0], self.R[1, 0], self.R[2, 0] = right.x, right.y, right.z\n down = glm.cross(self.front, self.right) # down\n self.R[0, 1], self.R[1, 1], self.R[2, 1] = down.x, down.y, down.z\n\n @property\n def center(self): return -glm.transpose(self.R) @ self.T # 3,\n\n @center.setter\n def center(self, v: vec3):\n self.T = -self.R @ v # 3, 1\n\n @property\n def s(self): return self.K[1, 0]\n\n @s.setter\n def s(self, s): self.K[1, 0] = s\n\n @property\n def fx(self): return self.K[0, 0]\n\n @fx.setter\n def fx(self, v: float):\n v = min(v, 1e5)\n v = max(v, 1e-3)\n if self.lock_fx_fy:\n self.K[1, 1] = v / self.K[0, 0] * self.K[1, 1]\n self.K[0, 0] = v\n\n @property\n def fy(self): return self.K[1, 1]\n\n @fy.setter\n def fy(self, v: float):\n if self.lock_fx_fy:\n self.K[0, 0] = v / self.K[1, 1] * self.K[0, 0]\n self.K[1, 1] = v\n\n @property\n def cx(self): return self.K[2, 0]\n\n @cx.setter\n def cx(self, v: float):\n self.K[2, 0] = v\n\n @property\n def cy(self): return self.K[2, 1]\n\n @cy.setter\n def cy(self, v: float):\n self.K[2, 1] = v\n\n def begin_dragging(self,\n x: float, y: float,\n is_panning: bool,\n about_origin: bool,\n ):\n self.is_dragging = True\n self.is_panning = is_panning\n self.about_origin = about_origin\n self.drag_start = vec2([x, y])\n\n # Record internal states # ? Will this make a copy?\n self.drag_start_front = self.front # a recording\n self.drag_start_down = self.down\n self.drag_start_right = self.right\n self.drag_start_center = self.center\n self.drag_start_origin = self.origin\n self.drag_start_world_up = self.world_up\n\n # Need to find the max or min delta y to align with world_up\n dot = glm.dot(self.world_up, self.drag_start_front)\n self.drag_ymin = -np.arccos(-dot) + 0.01 # drag up, look down\n self.drag_ymax = np.pi + self.drag_ymin - 0.02 # remove the 0.01 of drag_ymin\n\n def end_dragging(self):\n self.is_dragging = False\n\n def update_dragging(self, x: float, y: float):\n if not self.is_dragging:\n return\n\n current = vec2(x, y)\n delta = current - self.drag_start\n delta /= max(self.H, self.W)\n delta *= -1\n\n if self.is_panning:\n delta *= self.movement_speed\n center_delta = delta[0] * self.drag_start_right + delta[1] * self.drag_start_down\n self.center = self.drag_start_center + center_delta\n if self.about_origin:\n self.origin = self.drag_start_origin + center_delta\n else:\n m = mat4(1.0)\n m = glm.rotate(m, delta.x % 2 * np.pi, self.world_up)\n m = glm.rotate(m, np.clip(delta.y, self.drag_ymin, self.drag_ymax), self.drag_start_right)\n self.front = m @ self.drag_start_front # might overshoot\n\n if self.about_origin:\n self.center = -m @ (self.origin - self.drag_start_center) + self.origin\n\n def move(self, x_offset: float, y_offset: float):\n speed_factor = 1e-1\n movement = y_offset * speed_factor\n movement = movement * self.front * self.movement_speed\n self.center += movement\n\n if self.is_dragging:\n self.drag_start_center += movement\n\n def to_batch(self):\n meta = dotdict()\n meta.H = torch.as_tensor(self.H)\n meta.W = torch.as_tensor(self.W)\n meta.K = torch.as_tensor(self.K.to_list()).mT\n meta.R = torch.as_tensor(self.R.to_list()).mT\n meta.T = torch.as_tensor(self.T.to_list())[..., None]\n meta.n = torch.as_tensor(self.n)\n meta.f = torch.as_tensor(self.f)\n meta.t = torch.as_tensor(self.t)\n meta.v = torch.as_tensor(self.v)\n meta.bounds = torch.as_tensor(self.bounds.to_list()) # no transpose for bounds\n\n # GUI related elements\n meta.movement_speed = torch.as_tensor(self.movement_speed)\n meta.origin = torch.as_tensor(self.origin.to_list())\n meta.world_up = torch.as_tensor(self.world_up.to_list())\n\n batch = dotdict()\n batch.update(meta)\n batch.meta.update(meta)\n return batch\n\n def to_easymocap(self):\n batch = self.to_batch()\n camera = to_numpy(batch)\n return camera\n\n def from_easymocap(self, camera: dict):\n batch = to_tensor(camera)\n self.from_batch(batch)\n return self\n\n def to_string(self) -> str:\n batch = to_list(self.to_batch().meta)\n return json.dumps(batch)\n\n def from_string(self, string: str):\n batch = to_tensor(dotdict(json.loads(string)), ignore_list=True)\n self.from_batch(batch)\n\n def from_batch(self, batch: dotdict):\n H, W, K, R, T, n, f, t, v, bounds = batch.H, batch.W, batch.K, batch.R, batch.T, batch.n, batch.f, batch.t, batch.v, batch.bounds\n\n # Batch (network input parameters)\n self.H = int(H)\n self.W = int(W)\n self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel()) # 3,\n self.n = float(n)\n self.f = float(f)\n self.t = float(t)\n self.v = float(v)\n self.bounds = mat2x3(*bounds.ravel()) # 2, 3\n\n if 'movement_speed' in batch: self.movement_speed = float(batch.movement_speed)\n if 'origin' in batch: self.origin = vec3(*batch.origin.ravel()) # 3,\n if 'world_up' in batch: self.world_up = vec3(*batch.world_up.ravel()) # 3,\n return self\n\n def custom_pose(self, R: torch.Tensor, T: torch.Tensor, K: torch.Tensor):\n # self.K = mat3(*K.mT.ravel())\n self.R = mat3(*R.mT.ravel())\n self.T = vec3(*T.ravel())" }, { "identifier": "cm_cpu_store", "path": "easyvolcap/utils/color_utils.py", "snippet": "def colormap(v: torch.Tensor, cm: str = 'virdis'):\ndef colormap_linear(v: torch.Tensor, cm: NoneType = None):\ndef colormap_dict(v: torch.Tensor, cm: torch.Tensor):\ndef colormap_list(v: torch.Tensor, cm: torch.Tensor):\ndef yuv_to_rgb(x):\ndef rgb_to_yuv(x):\ndef image_derivative(img: torch.Tensor, mode='sobel', normalized=True) -> torch.Tensor:\ndef image_pyramid(input: torch.Tensor, max_level: int = 4) -> List[torch.Tensor]:\ndef variance_of_laplacian(img: torch.Tensor):" }, { "identifier": "depth_curve_fn", "path": "easyvolcap/utils/depth_utils.py", "snippet": "def depth_curve_fn(depth: torch.Tensor, p: float = 0.01, cm: str = 'linear'):\n depth = normalize_depth(depth)\n depth = colormap(depth, cm)\n return depth" }, { "identifier": "load_pts", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_pts(filename: str):\n from pyntcloud import PyntCloud\n cloud = PyntCloud.from_file(filename)\n verts = cloud.xyz\n if 'red' in cloud.points and 'green' in cloud.points and 'blue' in cloud.points:\n r = np.asarray(cloud.points['red'])\n g = np.asarray(cloud.points['green'])\n b = np.asarray(cloud.points['blue'])\n colors = np.stack([r, g, b], axis=-1) / 255\n elif 'r' in cloud.points and 'g' in cloud.points and 'b' in cloud.points:\n r = np.asarray(cloud.points['r'])\n g = np.asarray(cloud.points['g'])\n b = np.asarray(cloud.points['b'])\n colors = np.stack([r, g, b], axis=-1) / 255\n else:\n colors = None\n\n if 'nx' in cloud.points and 'ny' in cloud.points and 'nz' in cloud.points:\n nx = np.asarray(cloud.points['nx'])\n ny = np.asarray(cloud.points['ny'])\n nz = np.asarray(cloud.points['nz'])\n norms = np.stack([nx, ny, nz], axis=-1)\n else:\n norms = None\n\n if 'alpha' in cloud.points:\n cloud.points['alpha'] = cloud.points['alpha'] / 255\n\n reserved = ['x', 'y', 'z', 'red', 'green', 'blue', 'r', 'g', 'b', 'nx', 'ny', 'nz']\n scalars = dotdict({k: np.asarray(cloud.points[k])[..., None] for k in cloud.points if k not in reserved}) # one extra dimension at the back added\n return verts, colors, norms, scalars" }, { "identifier": "load_mesh", "path": "easyvolcap/utils/data_utils.py", "snippet": "def load_mesh(filename: str, device='cuda', load_uv=False, load_aux=False, backend='pytorch3d'):\n from pytorch3d.io import load_ply, load_obj\n if backend == 'trimesh':\n import trimesh\n mesh: trimesh.Trimesh = trimesh.load(filename)\n return mesh.vertices, mesh.faces\n\n vm, fm = None, None\n if filename.endswith('.npz'):\n mesh = np.load(filename)\n v = torch.from_numpy(mesh['verts'])\n f = torch.from_numpy(mesh['faces'])\n\n if load_uv:\n vm = torch.from_numpy(mesh['uvs'])\n fm = torch.from_numpy(mesh['uvfaces'])\n else:\n if filename.endswith('.ply'):\n v, f = load_ply(filename)\n elif filename.endswith('.obj'):\n v, faces_attr, aux = load_obj(filename)\n f = faces_attr.verts_idx\n\n if load_uv:\n vm = aux.verts_uvs\n fm = faces_attr.textures_idx\n else:\n raise NotImplementedError(f'Unrecognized input format for: {filename}')\n\n v = v.to(device, non_blocking=True).contiguous()\n f = f.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n vm = vm.to(device, non_blocking=True).contiguous()\n fm = fm.to(device, non_blocking=True).contiguous()\n\n if load_uv:\n if load_aux:\n return v, f, vm, fm, aux\n else:\n return v, f, vm, fm\n else:\n return v, f" }, { "identifier": "to_cuda", "path": "easyvolcap/utils/data_utils.py", "snippet": "def to_cuda(batch, device=\"cuda\", ignore_list: bool = False) -> torch.Tensor:\n if isinstance(batch, (tuple, list)):\n batch = [to_cuda(b, device, ignore_list) for b in batch]\n elif isinstance(batch, dict):\n batch = dotdict({k: (to_cuda(v, device, ignore_list) if k != \"meta\" else v) for k, v in batch.items()})\n elif isinstance(batch, torch.Tensor):\n batch = batch.to(device, non_blocking=True)\n else: # numpy and others\n batch = torch.as_tensor(batch, device=device)\n return batch" }, { "identifier": "prepare_feedback_transform", "path": "easyvolcap/utils/fcds_utils.py", "snippet": "def prepare_feedback_transform(H: int, W: int, K: torch.Tensor, R: torch.Tensor, T: torch.Tensor,\n n: torch.Tensor,\n f: torch.Tensor,\n xyz: torch.Tensor,\n rgb: torch.Tensor,\n rad: torch.Tensor):\n ixt = get_ndc_perspective_matrix(K, H, W, n[..., 0], f[..., 0]).to(xyz.dtype) # to opengl, remove last dim of n and f\n w2c = affine_padding(torch.cat([R, T], dim=-1)).to(xyz.dtype)\n c2w = affine_inverse(w2c)\n c2w[..., 0] *= 1 # flip x\n c2w[..., 1] *= -1 # flip y\n c2w[..., 2] *= -1 # flip z\n ext = affine_inverse(c2w)\n pix_xyz = torch.cat([xyz, torch.ones_like(xyz[..., :1])], dim=-1) @ ext.mT @ ixt.mT\n pix_rad = abs(H * ixt[..., 1, 1][..., None, None] * rad / pix_xyz[..., -1:]) # z: B, 1 * B, N, world space radius -> ndc radius B, N, 1\n\n # Prepare data to be rendered\n data = torch.cat([pix_xyz, rgb, pix_rad], dim=-1).ravel() # organize the data inside vbo\n return data" }, { "identifier": "get_opencv_camera_params", "path": "easyvolcap/utils/fcds_utils.py", "snippet": "def get_opencv_camera_params(batch: dotdict):\n H = batch.meta.H[0].item() # !: BATCH\n W = batch.meta.W[0].item() # !: BATCH\n K = batch.K\n R = batch.R\n T = batch.T\n C = -batch.R.mT @ batch.T # B, 3, 1\n return H, W, K, R, T, C" }, { "identifier": "typed", "path": "easyvolcap/utils/net_utils.py", "snippet": "def typed(input_to: torch.dtype = torch.float, output_to: torch.dtype = torch.float):\n from easyvolcap.utils.data_utils import to_x\n\n def wrapper(func: Callable):\n def inner(*args, **kwargs):\n args = to_x(args, input_to)\n kwargs = to_x(kwargs, input_to)\n ret = func(*args, **kwargs)\n ret = to_x(ret, output_to)\n return ret\n return inner\n return wrapper" }, { "identifier": "multi_gather", "path": "easyvolcap/utils/net_utils.py", "snippet": "def multi_gather(values: torch.Tensor, indices: torch.Tensor, dim=-2):\n # Gather the value at the -2th dim of values, augment index shape on the back\n # Example: values: B, P, 3, index: B, N, -> B, N, 3\n\n # index will first be augmented to match the values' dimentionality at the back\n # take care of batch dimension of, and acts like a linear indexing in the target dimention\n # we assume that the values's second to last dimension is the dimension to be indexed on\n return values.gather(dim, multi_indexing(indices, values.shape, dim))" }, { "identifier": "create_meshgrid", "path": "easyvolcap/utils/net_utils.py", "snippet": "@torch.jit.script\ndef create_meshgrid(H: int, W: int, device: torch.device = torch.device('cuda'), indexing: str = 'ij', ndc: bool = False,\n correct_pix: bool = True, dtype: torch.dtype = torch.float):\n # kornia has meshgrid, but not the best\n i = torch.arange(H, device=device, dtype=dtype)\n j = torch.arange(W, device=device, dtype=dtype)\n if correct_pix:\n i = i + 0.5\n j = j + 0.5\n if ndc:\n i = i / H * 2 - 1\n j = j / W * 2 - 1\n ij = torch.meshgrid(i, j, indexing=indexing) # defaults to ij\n ij = torch.stack(ij, dim=-1) # Ht, Wt, 2\n\n return ij" }, { "identifier": "volume_rendering", "path": "easyvolcap/utils/net_utils.py", "snippet": "def volume_rendering(rgb: torch.Tensor, occ: torch.Tensor, bg_brightness: float = 0.0):\n # NOTE: here occ's last dim is not 1, but n_samples\n # rgb: n_batch, n_rays, n_samples, 3\n # occ: n_batch, n_rays, n_samples, 1\n # bg_image: n_batch, n_rays, 3 or None, if this is given as not None, the last sample on the ray will be replaced by this value (assuming this lies on the background)\n # We need to assume:\n # 1. network will find the True geometry, thus giving the background image its real value\n # 2. background image is rendered in a non-static fasion\n # returns:\n # weights: n_batch, n_rays, n_samples\n # rgb_map: n_batch, n_rays, 3\n # acc_map: n_batch, n_rays, 1\n\n weights = render_weights(occ) # (n_batch, n_rays, n_samples)\n rgb_map, acc_map = render_rgb_acc(weights, rgb)\n rgb_map = rgb_map + (1. - acc_map) * bg_brightness\n\n return weights, rgb_map, acc_map" }, { "identifier": "raw2alpha", "path": "easyvolcap/utils/net_utils.py", "snippet": "def raw2alpha(raws: torch.Tensor, dists=0.005, bias=0.0):\n if isinstance(dists, torch.Tensor):\n if dists.ndim == raws.ndim - 1:\n dists = dists[..., None]\n return 1. - torch.exp(-(raws + bias) * dists)" }, { "identifier": "torch_dtype_to_numpy_dtype", "path": "easyvolcap/utils/net_utils.py", "snippet": "def torch_dtype_to_numpy_dtype(torch_dtype):\n mapping = {\n torch.float32: np.float32,\n torch.float64: np.float64,\n torch.int32: np.int32,\n torch.int64: np.int64,\n torch.int16: np.int16,\n torch.uint8: np.uint8,\n torch.int8: np.int8,\n torch.bool: np.bool_\n }\n return mapping.get(torch_dtype, None)" }, { "identifier": "load_pretrained", "path": "easyvolcap/utils/net_utils.py", "snippet": "def load_pretrained(model_dir: str, resume: bool = True, epoch: int = -1, ext: str = '.npz', remove_if_not_resuming: bool = False, warn_if_not_exist: bool = False):\n if not resume: # remove nothing here\n if remove_if_not_resuming:\n if os.path.isdir(model_dir) and len(os.listdir(model_dir)): # only inform the use if there are files\n # log(red(f\"Removing trained weights: {blue(model_dir)}\"))\n try: run(f'rm -r {model_dir}')\n except: pass\n return None, None\n\n if not os.path.exists(model_dir):\n if warn_if_not_exist:\n log(red(f'Pretrained network: {blue(model_dir)} does not exist'))\n return None, None\n if os.path.isdir(model_dir):\n pts = [\n int(pt.split('.')[0]) for pt in os.listdir(model_dir) if pt != f'latest{ext}' and pt.endswith(ext) and pt.split('.')[0].isnumeric()\n ]\n if len(pts) == 0 and f'latest{ext}' not in os.listdir(model_dir):\n return None, None\n if epoch == -1:\n if f'latest{ext}' in os.listdir(model_dir):\n pt = 'latest'\n else:\n pt = max(pts)\n else:\n pt = epoch\n model_path = join(model_dir, f'{pt}{ext}')\n else:\n model_path = model_dir\n\n if ext == '.pt' or ext == '.pth':\n pretrained = dotdict(torch.load(model_path, 'cpu'))\n else:\n from easyvolcap.utils.data_utils import to_tensor\n pretrained = dotdict(model=to_tensor(dict(**np.load(model_path))), epoch=-1) # the npz files do not contain training parameters\n\n return pretrained, model_path" }, { "identifier": "get_bounds", "path": "easyvolcap/utils/net_utils.py", "snippet": "def get_bounds(xyz, padding=0.05): # 5mm padding? really?\n # xyz: n_batch, n_points, 3\n\n min_xyz = torch.min(xyz, dim=1)[0] # torch min with dim is ...\n max_xyz = torch.max(xyz, dim=1)[0]\n min_xyz -= padding\n max_xyz += padding\n bounds = torch.stack([min_xyz, max_xyz], dim=1)\n return bounds\n diagonal = bounds[..., 1:] - bounds[..., :1] # n_batch, 1, 3\n bounds[..., 1:] = bounds[..., :1] + torch.ceil(diagonal / voxel_size) * voxel_size # n_batch, 1, 3\n return bounds" }, { "identifier": "CHECK_CUDART_ERROR", "path": "easyvolcap/utils/net_utils.py", "snippet": "def CHECK_CUDART_ERROR(args):\n from cuda import cudart\n\n if isinstance(args, tuple):\n assert len(args) >= 1\n err = args[0]\n if len(args) == 1:\n ret = None\n elif len(args) == 2:\n ret = args[1]\n else:\n ret = args[1:]\n else:\n err = args\n ret = None\n\n assert isinstance(err, cudart.cudaError_t), type(err)\n if err != cudart.cudaError_t.cudaSuccess:\n raise RuntimeError(FORMAT_CUDART_ERROR(err))\n\n return ret" }, { "identifier": "FORMAT_CUDART_ERROR", "path": "easyvolcap/utils/net_utils.py", "snippet": "def FORMAT_CUDART_ERROR(err):\n from cuda import cudart\n return (\n f\"{cudart.cudaGetErrorName(err)[1].decode('utf-8')}({int(err)}): \"\n f\"{cudart.cudaGetErrorString(err)[1].decode('utf-8')}\"\n )" } ]
from typing import TYPE_CHECKING from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager # must be imported before OpenGL.GL from torch import nn from enum import Enum, auto from os.path import join, dirname from typing import Dict, Union, List from glm import vec2, vec3, vec4, mat3, mat4, mat4x3, mat2x3 # This is actually highly optimized from easyvolcap.utils.console_utils import * from easyvolcap.utils.base_utils import dotdict from easyvolcap.utils.viewer_utils import Camera from easyvolcap.utils.color_utils import cm_cpu_store from easyvolcap.utils.depth_utils import depth_curve_fn from easyvolcap.utils.data_utils import load_pts, load_mesh, to_cuda from easyvolcap.utils.fcds_utils import prepare_feedback_transform, get_opencv_camera_params from easyvolcap.utils.net_utils import typed, multi_gather, create_meshgrid, volume_rendering, raw2alpha, torch_dtype_to_numpy_dtype, load_pretrained, get_bounds from easyvolcap.utils.net_utils import CHECK_CUDART_ERROR, FORMAT_CUDART_ERROR from easyvolcap.utils.egl_utils import create_opengl_context, eglContextManager from OpenGL.GL import shaders from pytorch3d.structures import Pointclouds, Meshes from pytorch3d.structures import Pointclouds, Meshes from cuda import cudart from cuda import cudart from cuda import cudart from easyvolcap.engine.registry import call_from_cfg from easyvolcap.utils.gaussian_utils import GaussianModel from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart from cuda import cudart import os import glm import torch import ctypes import numpy as np import sys import OpenGL.GL as gl
12,167
# Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering batch = to_cuda(camera.to_batch()) rgb, acc, dpt = self.gaussian_model.render(batch) if self.render_depth:
from __future__ import annotations if TYPE_CHECKING: # fmt: off # Environment variable messaging # Need to export EGL_DEVICE_ID before trying to import egl # And we need to consider the case when we're performing distributed training # from easyvolcap.engine import cfg, args # FIXME: GLOBAL IMPORTS if 'easyvolcap.engine' in sys.modules and (sys.modules['easyvolcap.engine'].args.type != 'gui' or sys.modules['easyvolcap.engine'].cfg.viewer_cfg.type == 'UnitySocketViewer'): # FIXME: GLOBAL VARIABLES try: except Exception as e: log(yellow(f'Could not import EGL related modules. {type(e).__name__}: {e}')) os.environ['PYOPENGL_PLATFORM'] = '' try: except Exception as e: print(f'WARNING: OpenGL shaders import error encountered, please install the latest PyOpenGL from github using:') print(f'pip install git+https://github.com/mcfletch/pyopengl') raise e # fmt: on def linearize_depth(d, n: float, f: float): # 0-1 -> -1,1 # ndc -> view return (2.0 * n * f) / (f + n - (d * 2 - 1) * (f - n)) def common_opengl_options(): # Use program point size gl.glEnable(gl.GL_PROGRAM_POINT_SIZE) # Performs face culling gl.glEnable(gl.GL_CULL_FACE) gl.glCullFace(gl.GL_BACK) # Performs alpha trans testing gl.glEnable(gl.GL_ALPHA_TEST) # Performs z-buffer testing gl.glEnable(gl.GL_DEPTH_TEST) # gl.glDepthMask(gl.GL_TRUE) gl.glDepthFunc(gl.GL_LEQUAL) # gl.glDepthRange(-1.0, 1.0) gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) # Enable some masking tests gl.glEnable(gl.GL_SCISSOR_TEST) # Enable this to correctly render points # https://community.khronos.org/t/gl-point-sprite-gone-in-3-2/59310 gl.glEnable(gl.GL_POINT_SPRITE) # MARK: ONLY SPRITE IS WORKING FOR NOW # gl.glEnable(gl.GL_POINT_SMOOTH) # MARK: ONLY SPRITE IS WORKING FOR NOW # # Configure how we store the pixels in memory for our subsequent reading of the FBO to store the rendering into memory. # # The second argument specifies that our pixels will be in bytes. # gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, 1) def load_shader_source(file: str = 'splat.frag'): # Ideally we can just specify the shader name instead of an variable if not exists(file): file = f'{dirname(__file__)}/shaders/{file}' if not exists(file): file = file.replace('shaders/', '') if not exists(file): raise RuntimeError(f'Shader file: {file} does not exist') with open(file, 'r') as f: return f.read() def use_gl_program(program: Union[shaders.ShaderProgram, dict]): if isinstance(program, dict): # Recompile the program if the user supplied sources program = dotdict(program) program = shaders.compileProgram( shaders.compileShader(program.VERT_SHADER_SRC, gl.GL_VERTEX_SHADER), shaders.compileShader(program.FRAG_SHADER_SRC, gl.GL_FRAGMENT_SHADER) ) return gl.glUseProgram(program) class Mesh: class RenderType(Enum): POINTS = 1 LINES = 2 TRIS = 3 QUADS = 4 # TODO: Support quad loading STRIPS = 5 # Helper class to render a mesh on opengl # This implementation should only be used for debug visualization # Since no differentiable mechanism will be added # We recommend using nvdiffrast and pytorch3d's point renderer directly if you will to optimize these structures directly def __init__(self, verts: torch.Tensor = torch.tensor([[0, 0, 0], [0, 1, 0], [0, 0, 1]]), # need to call update after update faces: torch.Tensor = torch.tensor([[0, 1, 2]]), # need to call update after update colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict(), render_type: RenderType = RenderType.TRIS, # Misc info name: str = 'mesh', filename: str = '', visible: bool = True, # Render options shade_flat: bool = False, # smooth shading point_radius: float = 0.015, render_normal: bool = False, # Storage options store_device: str = 'cpu', compute_device: str = 'cuda', vert_sizes=[3, 3, 3], # pos + color + norm # Init options est_normal_thresh: int = 100000, # Ignore unused input **kwargs, ) -> None: super().__init__() self.name = name self.visible = visible self.render_type = render_type self.shade_flat = shade_flat self.point_radius = point_radius self.render_normal = render_normal self.store_device = store_device self.compute_device = compute_device self.vert_sizes = vert_sizes self.est_normal_thresh = est_normal_thresh # Uniform and program self.compile_shaders() self.uniforms = dotdict() # uniform values # Before initialization self.max_verts = 0 self.max_faces = 0 # OpenGL data if filename: self.load_from_file(filename) else: self.load_from_data(verts, faces, colors, normals, scalars) def compile_shaders(self): try: self.mesh_program = shaders.compileProgram( shaders.compileShader(load_shader_source('mesh.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('mesh.frag'), gl.GL_FRAGMENT_SHADER) ) self.point_program = shaders.compileProgram( shaders.compileShader(load_shader_source('point.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('point.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e @property def n_verts_bytes(self): return len(self.verts) * self.vert_size * self.verts.element_size() @property def n_faces_bytes(self): return len(self.faces) * self.face_size * self.faces.element_size() @property def verts_data(self): # a heavy copy operation verts = torch.cat([self.verts, self.colors, self.normals], dim=-1).ravel().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts @property def faces_data(self): # a heavy copy operation faces = self.faces.ravel().numpy() # N, 3 faces = np.asarray(faces, dtype=np.uint32, order='C') return faces @property def face_size(self): return self.render_type.value @property def vert_size(self): return sum(self.vert_sizes) def load_from_file(self, filename: str = 'assets/meshes/bunny.ply'): verts, faces, colors, normals, scalars = self.load_data_from_file(filename) self.load_from_data(verts, faces, colors, normals, scalars) def load_data_from_file(self, filename: str = 'assets/meshes/bunny.ply'): self.name = os.path.split(filename)[-1] verts, faces, colors, normals, scalars = None, None, None, None, None verts, faces = load_mesh(filename, device=self.store_device) if not len(faces): verts, colors, normals, scalars = load_pts(filename) self.render_type = Mesh.RenderType.POINTS else: self.render_type = Mesh.RenderType(faces.shape[-1]) # use value return verts, faces, colors, normals, scalars def load_from_data(self, verts: torch.Tensor, faces: torch.Tensor, colors: torch.Tensor = None, normals: torch.Tensor = None, scalars: dotdict[str, torch.Tensor] = dotdict()): # Data type conversion verts = torch.as_tensor(verts) # convert to tensor if input is of other types if verts.dtype == torch.float32: pass # supports this for now elif verts.dtype == torch.float16: pass # supports this for now else: verts = verts.type(torch.float) # convert to float32 if input is of higher precision gl_dtype = gl.GL_FLOAT if verts.dtype == torch.float else gl.GL_HALF_FLOAT self.vert_gl_types = [gl_dtype] * len(self.vert_sizes) # Prepare main mesh data: vertices and faces self.verts = torch.as_tensor(verts, device=self.store_device) self.faces = torch.as_tensor(faces, device=self.store_device, dtype=torch.int32) # NOTE: No uint32 support # Prepare colors and normals if colors is not None: self.colors = torch.as_tensor(colors, device=self.store_device, dtype=self.verts.dtype) else: bounds = get_bounds(self.verts[None])[0] self.colors = (self.verts - bounds[0]) / (bounds[1] - bounds[0]) if normals is not None: self.normals = torch.as_tensor(normals, device=self.store_device, dtype=self.verts.dtype) else: self.estimate_vertex_normals() # Prepare other scalars if scalars is not None: for k, v in scalars.items(): setattr(self, k, torch.as_tensor(v, device=self.store_device, dtype=self.verts.dtype)) # is this ok? # Prepare OpenGL related buffer self.update_gl_buffers() def estimate_vertex_normals(self): def est_pcd_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: pcd = Pointclouds([self.verts]).to(self.compute_device) self.normals = pcd.estimate_normals()[0].cpu().to(self.verts.dtype) # no batch dim def est_tri_norms(): if self.verts.dtype == torch.half: self.normals = self.verts else: mesh = Meshes([self.verts], [self.faces]).to(self.compute_device) self.normals = mesh.verts_normals_packed().cpu().to(self.verts.dtype) # no batch dim if not len(self.verts) > self.est_normal_thresh: if self.render_type == Mesh.RenderType.TRIS: est_tri_norms() elif self.render_type == Mesh.RenderType.POINTS: est_pcd_norms() else: # log(yellow(f'Unsupported mesh type: {self.render_type} for normal estimation, skipping')) self.normals = self.verts else: # log(yellow(f'Number of points for mesh too large: {len(self.verts)} > {self.est_normal_thresh}, skipping normal estimation')) self.normals = self.verts def offscreen_render(self, eglctx: "eglContextManager", camera: Camera): eglctx.resize(camera.W, camera.H) self.render(camera) def render(self, camera: Camera): if not self.visible: return # For point rendering if self.render_type == Mesh.RenderType.POINTS: gl.glUseProgram(self.point_program) self.use_gl_program(self.point_program) else: gl.glUseProgram(self.mesh_program) self.use_gl_program(self.mesh_program) self.upload_gl_uniforms(camera) gl.glBindVertexArray(self.vao) if self.render_type == Mesh.RenderType.POINTS: gl.glDrawArrays(gl.GL_POINTS, 0, len(self.verts)) # number of vertices elif self.render_type == Mesh.RenderType.LINES: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_LINES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.TRIS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_TRIANGLES, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.QUADS: gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glDrawElements(gl.GL_QUADS, len(self.faces) * self.face_size, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # number of indices elif self.render_type == Mesh.RenderType.STRIPS: gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) else: raise NotImplementedError gl.glBindVertexArray(0) def use_gl_program(self, program: shaders.ShaderProgram): use_gl_program(program) self.uniforms.shade_flat = gl.glGetUniformLocation(program, "shade_flat") self.uniforms.point_radius = gl.glGetUniformLocation(program, "point_radius") self.uniforms.render_normal = gl.glGetUniformLocation(program, "render_normal") self.uniforms.H = gl.glGetUniformLocation(program, "H") self.uniforms.W = gl.glGetUniformLocation(program, "W") self.uniforms.n = gl.glGetUniformLocation(program, "n") self.uniforms.f = gl.glGetUniformLocation(program, "f") self.uniforms.P = gl.glGetUniformLocation(program, "P") self.uniforms.K = gl.glGetUniformLocation(program, "K") self.uniforms.V = gl.glGetUniformLocation(program, "V") self.uniforms.M = gl.glGetUniformLocation(program, "M") def upload_gl_uniforms(self, camera: Camera): K = camera.gl_ixt # hold the reference V = camera.gl_ext # hold the reference M = glm.identity(mat4) P = K * V * M gl.glUniform1i(self.uniforms.shade_flat, self.shade_flat) gl.glUniform1f(self.uniforms.point_radius, self.point_radius) gl.glUniform1i(self.uniforms.render_normal, self.render_normal) gl.glUniform1i(self.uniforms.H, camera.H) # o2w gl.glUniform1i(self.uniforms.W, camera.W) # o2w gl.glUniform1f(self.uniforms.n, camera.n) # o2w gl.glUniform1f(self.uniforms.f, camera.f) # o2w gl.glUniformMatrix4fv(self.uniforms.P, 1, gl.GL_FALSE, glm.value_ptr(P)) # o2clip gl.glUniformMatrix4fv(self.uniforms.K, 1, gl.GL_FALSE, glm.value_ptr(K)) # c2clip gl.glUniformMatrix4fv(self.uniforms.V, 1, gl.GL_FALSE, glm.value_ptr(V)) # w2c gl.glUniformMatrix4fv(self.uniforms.M, 1, gl.GL_FALSE, glm.value_ptr(M)) # o2w def update_gl_buffers(self): # Might be overwritten self.resize_buffers(len(self.verts) if hasattr(self, 'verts') else 0, len(self.faces) if hasattr(self, 'faces') else 0) # maybe repeated if hasattr(self, 'verts'): gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, self.n_verts_bytes, self.verts_data) # hold the reference if hasattr(self, 'faces'): gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, self.n_faces_bytes, self.faces_data) def resize_buffers(self, v: int = 0, f: int = 0): if v > self.max_verts or f > self.max_faces: if v > self.max_verts: self.max_verts = v if f > self.max_faces: self.max_faces = f self.init_gl_buffers(v, f) def init_gl_buffers(self, v: int = 0, f: int = 0): # This will only init the corresponding buffer object n_verts_bytes = v * self.vert_size * self.verts.element_size() if v > 0 else self.n_verts_bytes n_faces_bytes = f * self.face_size * self.faces.element_size() if f > 0 else self.n_faces_bytes # Housekeeping if hasattr(self, 'vao'): gl.glDeleteVertexArrays(1, [self.vao]) gl.glDeleteBuffers(2, [self.vbo, self.ebo]) self.vao = gl.glGenVertexArrays(1) self.vbo = gl.glGenBuffers(1) self.ebo = gl.glGenBuffers(1) gl.glBindVertexArray(self.vao) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo) gl.glBufferData(gl.GL_ARRAY_BUFFER, n_verts_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) # NOTE: Using pointers here won't work # https://stackoverflow.com/questions/67195932/pyopengl-cannot-render-any-vao cumsum = 0 for i, (s, t) in enumerate(zip(self.vert_sizes, self.vert_gl_types)): gl.glVertexAttribPointer(i, s, t, gl.GL_FALSE, self.vert_size * self.verts.element_size(), ctypes.c_void_p(cumsum * self.verts.element_size())) # we use 32 bit float gl.glEnableVertexAttribArray(i) cumsum += s if n_faces_bytes > 0: # Some implementation has no faces, we dangerously ignore ebo here, assuming they will never be used gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.ebo) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, n_faces_bytes, ctypes.c_void_p(0), gl.GL_DYNAMIC_DRAW) gl.glBindVertexArray(0) def render_imgui(self): pass class Quad(Mesh): # A shared texture for CUDA (pytorch) and OpenGL # Could be rendererd to screen using blitting or just drawing a quad def __init__(self, H: int = 256, W: int = 256, use_cudagl: bool = True, compose: bool = False, compose_power: float = 1.0): # the texture to blip self.use_cudagl = use_cudagl self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.render_type = Mesh.RenderType.STRIPS # remove side effects of settings _type self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.update_gl_buffers() self.compile_shaders() self.max_H, self.max_W = H, W self.H, self.W = H, W self.compose = compose self.compose_power = compose_power self.init_texture() @property def n_faces_bytes(self): return 0 def use_gl_program(self, program: shaders.ShaderProgram): super().use_gl_program(program) self.uniforms.tex = gl.glGetUniformLocation(program, 'tex') gl.glUseProgram(self.quad_program) # use a different program gl.glUniform1i(self.uniforms.tex, 0) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('quad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('quad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def resize_textures(self, H: int, W: int): # analogy to update_gl_buffers self.H, self.W = H, W if self.H > self.max_H or self.W > self.max_W: # max got updated self.max_H, self.max_W = max(int(self.H * 1.05), self.max_H), max(int(self.W * 1.05), self.max_W) self.init_texture() def init_texture(self): if hasattr(self, 'cu_tex'): CHECK_CUDART_ERROR(cudart.cudaGraphicsUnregisterResource(self.cu_tex)) if hasattr(self, 'fbo'): gl.glDeleteFramebuffers(1, [self.fbo]) gl.glDeleteTextures(1, [self.tex]) # Init the texture to be blit onto the screen self.tex = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA8, self.max_W, self.max_H, 0, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ctypes.c_void_p(0)) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Init the framebuffer object if explicit blitting is used (slower than drawing quad) self.fbo = gl.glGenFramebuffers(1) old_fbo = gl.glGetIntegerv(gl.GL_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, self.fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, self.tex, 0) gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, old_fbo) if self.use_cudagl: if self.compose: # Both reading and writing of this resource is required flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsNone else: flags = cudart.cudaGraphicsRegisterFlags.cudaGraphicsRegisterFlagsWriteDiscard self.cu_tex = CHECK_CUDART_ERROR(cudart.cudaGraphicsGLRegisterImage(self.tex, gl.GL_TEXTURE_2D, flags)) def copy_to_texture(self, image: torch.Tensor, x: int = 0, y: int = 0, w: int = 0, h: int = 0): assert self.use_cudagl, "Need to enable cuda-opengl interop to copy from device to device, check creation of this Quad" w = w or self.W h = h or self.H if image.shape[-1] == 3: image = torch.cat([image, image.new_ones(image.shape[:-1] + (1,)) * 255], dim=-1) # add alpha channel kind = cudart.cudaMemcpyKind.cudaMemcpyDeviceToDevice CHECK_CUDART_ERROR(cudart.cudaGraphicsMapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) cu_tex_arr = CHECK_CUDART_ERROR(cudart.cudaGraphicsSubResourceGetMappedArray(self.cu_tex, 0, 0)) if self.compose: """ Blit current framebuffer to this texture (self.tex) Read content of this texture into a cuda buffer Perform alpha blending based on the frame's alpha channel Copy the blended image back into the texture (self.tex) """ old = gl.glGetInteger(gl.GL_DRAW_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, self.fbo) # read buffer defaults to 0 gl.glBlitFramebuffer(x, y, w, h, x, y, w, h, gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) # now self.tex contains the content of the already rendered frame gl.glBindFramebuffer(gl.GL_DRAW_FRAMEBUFFER, old) buffer = torch.empty_like(image) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DFromArrayAsync(buffer.data_ptr(), # dst w * 4 * buffer.element_size(), # dpitch cu_tex_arr, # src x * 4 * image.element_size(), # wOffset y, # hOffset w * 4 * buffer.element_size(), # width Width of matrix transfer (columns in bytes) h, # height kind, # kind torch.cuda.current_stream().cuda_stream)) # stream # cv2.imwrite('image.png', image.flip(0).detach().cpu().numpy()[..., [2,1,0,3]]) alpha = image[..., -1:] / 255 image[..., :-1] = buffer[..., :-1] * (1 - alpha ** self.compose_power) + image[..., :-1] * alpha # storing float into int image[..., -1:] = buffer[..., -1:] + image[..., -1:] image = image.clip(0, 255) CHECK_CUDART_ERROR(cudart.cudaMemcpy2DToArrayAsync(cu_tex_arr, x * 4 * image.element_size(), y, image.data_ptr(), w * 4 * image.element_size(), # differently sized w * 4 * image.element_size(), # rgba, should do a composition first h, kind, torch.cuda.current_stream().cuda_stream)) CHECK_CUDART_ERROR(cudart.cudaGraphicsUnmapResources(1, self.cu_tex, torch.cuda.current_stream().cuda_stream)) def upload_to_texture(self, ptr: np.ndarray): H, W = ptr.shape[:2] H, W = min(self.H, H), min(self.W, W) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glTexSubImage2D(gl.GL_TEXTURE_2D, 0, 0, 0, W, H, gl.GL_RGBA, gl.GL_UNSIGNED_BYTE, ptr[:H, :W]) # to gpu, might slow down? @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def render(self, camera: Camera = None): self.draw() # no uploading needed def draw(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ Upload the texture instead of the camera This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H _, _, W, H = gl.glGetIntegerv(gl.GL_VIEWPORT) gl.glViewport(x, y, w, h) gl.glScissor(x, y, w, h) # only render in this small region of the viewport gl.glUseProgram(self.quad_program) # use a different program gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.tex) gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) gl.glBindVertexArray(0) # Some house keepings gl.glViewport(0, 0, W, H) gl.glScissor(0, 0, W, H) def blit(self, x: int = 0, y: int = 0, w: int = 0, h: int = 0): """ This respects the OpenGL convension of lower left corners """ w = w or self.W h = h or self.H old = gl.glGetInteger(gl.GL_READ_FRAMEBUFFER_BINDING) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, self.fbo) # write buffer defaults to 0 gl.glBlitFramebuffer(x, y, x + w, y + h, # the height is flipped x, y, x + w, y + h, # the height is flipped gl.GL_COLOR_BUFFER_BIT, gl.GL_NEAREST) gl.glBindFramebuffer(gl.GL_READ_FRAMEBUFFER, old) class UQuad(Mesh): """ Responsible for initializing textures with a single value or blitting a texture to a framebuffer (possibly better done with blit instead of quad drawing) Effectively clearing the texture for real, see: https://stackoverflow.com/questions/37335281/is-glcleargl-color-buffer-bit-preferred-before-a-whole-frame-buffer-overwritte """ def __init__(self): self.n_blit_values = 3 self.vert_sizes = [3] # only position self.vert_gl_types = [gl.GL_FLOAT] # only position self.max_verts, self.max_faces = 0, 0 self.verts = torch.as_tensor([[-1., -1., 0.5], [1., -1., 0.5], [-1., 1., 0.5], [1., 1., 0.5],]) self.compile_shaders() self.uniforms = dotdict() # uniform values self.use_gl_programs(self.quad_program) self.update_gl_buffers() @property def n_faces_bytes(self): return 0 @property def verts_data(self): # a heavy copy operation verts = self.verts.ravel().detach().cpu().numpy() # MARK: Maybe sync verts = np.asarray(verts, dtype=np.float32, order='C') return verts def use_gl_programs(self, program: shaders.ShaderProgram): for i in range(self.n_blit_values): self.uniforms[f'value{i}'] = gl.glGetUniformLocation(program, f'value{i}') for i in range(self.n_blit_values): self.uniforms[f'use_tex{i}'] = gl.glGetUniformLocation(program, f'use_tex{i}') gl.glUseProgram(self.program) # use a different program for i in range(self.n_blit_values): self.uniforms[f'tex{i}'] = gl.glGetUniformLocation(program, f'tex{i}') gl.glUniform1i(self.uniforms[f'tex{i}'], i) def upload_gl_uniforms(self, values: List[List[float]], use_texs: List[bool]): for i, v in enumerate(values): v = vec4(v) # HACK: Hold the reference for this upload gl.glUniform4fv(self.uniforms[f'value{i}'], 1, glm.value_ptr(v)) # as float array for i, v in enumerate(use_texs): gl.glUniform1i(self.uniforms[f'use_tex{i}'], v) def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('uquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('uquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): """ This function will render 'value' to the currently bound framebuffer, up to six outputs """ old_prog = gl.glGetIntegerv(gl.GL_CURRENT_PROGRAM) old_vao = gl.glGetIntegerv(gl.GL_VERTEX_ARRAY_BINDING) gl.glUseProgram(self.quad_program) self.upload_gl_uniforms(values, use_texs) # should be a noop # Prepare to render to textures gl.glBindVertexArray(self.vao) gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, len(self.verts)) # number of vertices gl.glBindVertexArray(old_vao) gl.glUseProgram(old_prog) class DQuad(UQuad): def compile_shaders(self): try: self.quad_program = shaders.compileProgram( shaders.compileShader(load_shader_source('dquad.vert'), gl.GL_VERTEX_SHADER), shaders.compileShader(load_shader_source('dquad.frag'), gl.GL_FRAGMENT_SHADER) ) except Exception as e: print(str(e).encode('utf-8').decode('unicode_escape')) raise e def draw(self, values: List[List[float]] = [], use_texs=[]): old_function = gl.glGetIntegerv(gl.GL_DEPTH_FUNC) gl.glDepthFunc(gl.GL_ALWAYS) super().draw(values, use_texs) gl.glDepthFunc(old_function) def hardware_rendering_framebuffer(H: int, W: int, gl_tex_dtype=gl.GL_RGBA16F): # Prepare for write frame buffers color_buffer = gl.glGenTextures(1) depth_upper = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, color_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl_tex_dtype, W, H, 0, gl.GL_RGBA, gl.GL_FLOAT, ctypes.c_void_p(0)) # 16 * 4 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_upper) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_UNSIGNED_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, color_buffer, 0) # location 0 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_upper, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT2, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(3, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1, gl.GL_COLOR_ATTACHMENT2]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return color_buffer, depth_upper, depth_lower, depth_attach, fbo def hareward_peeling_framebuffer(H: int, W: int): # Prepare for write frame buffers index_buffer = gl.glGenTextures(1) depth_lower = gl.glGenTextures(1) depth_attach = gl.glGenTextures(1) fbo = gl.glGenFramebuffers(1) # generate 1 framebuffer, storereference in fb # Init the texture (call the resizing function), will simply allocate empty memory # The internal format describes how the texture shall be stored in the GPU. The format describes how the format of your pixel data in client memory (together with the type parameter). gl.glBindTexture(gl.GL_TEXTURE_2D, index_buffer) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32I, W, H, 0, gl.GL_RED_INTEGER, gl.GL_INT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_lower) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_R32F, W, H, 0, gl.GL_RED, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) gl.glBindTexture(gl.GL_TEXTURE_2D, depth_attach) gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_DEPTH_COMPONENT24, W, H, 0, gl.GL_DEPTH_COMPONENT, gl.GL_FLOAT, ctypes.c_void_p(0)) # 32 gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST) # Bind texture to fbo gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, fbo) gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT0, gl.GL_TEXTURE_2D, index_buffer, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_COLOR_ATTACHMENT1, gl.GL_TEXTURE_2D, depth_lower, 0) # location 1 gl.glFramebufferTexture2D(gl.GL_FRAMEBUFFER, gl.GL_DEPTH_ATTACHMENT, gl.GL_TEXTURE_2D, depth_attach, 0) gl.glDrawBuffers(2, [gl.GL_COLOR_ATTACHMENT0, gl.GL_COLOR_ATTACHMENT1]) # Check framebuffer status if gl.glCheckFramebufferStatus(gl.GL_FRAMEBUFFER) != gl.GL_FRAMEBUFFER_COMPLETE: log(red('Framebuffer not complete, exiting...')) raise RuntimeError('Incomplete framebuffer') # Restore the original state gl.glBindFramebuffer(gl.GL_FRAMEBUFFER, 0) return index_buffer, depth_lower, depth_attach, fbo class Gaussian(Mesh): def __init__(self, filename: str = 'assets/meshes/zju3dv.npz', gaussian_cfg: dotdict = dotdict(), quad_cfg: dotdict = dotdict(), render_depth: bool = False, # show depth or show color dpt_cm: str = 'linear', H: int = 1024, W: int = 1024, **kwargs, ): # Import Gaussian Model # Housekeeping super().__init__(**kwargs) self.name = split(filename)[-1] # Init Gaussian related models, for now only the first gaussian model is supported if filename.endswith('.npz') or filename.endswith('.pt') or filename.endswith('.pth'): # Load from GaussianTSampler pretrained, _ = load_pretrained(filename) # loaded model and updated path (maybe) pretrained = pretrained.model state_dict = dotdict() for k, v in pretrained.items(): if k.startswith('sampler.pcds.0'): state_dict[k.replace('sampler.pcds.0.', '')] = v # Load the parameters into the gaussian model self.gaussian_model: GaussianModel = call_from_cfg(GaussianModel, gaussian_cfg) # init empty gaussian model self.gaussian_model.load_state_dict(state_dict) # load the first gaussian model self.gaussian_model.cuda() # move the parameters to GPU elif filename.endswith('.ply'): # Load raw GaussianModel pass else: raise NotImplementedError # Init rendering quad self.quad: Quad = call_from_cfg(Quad, quad_cfg, H=H, W=W) # Other configurations self.render_depth = render_depth self.dpt_cm = dpt_cm # Disabling initialization def load_from_file(self, *args, **kwargs): pass def load_from_data(self, *args, **kwargs): pass def compile_shaders(self): pass def update_gl_buffers(self): pass def resize_textures(self, H: int, W: int): self.quad.resize_textures(H, W) # The actual rendering function @torch.no_grad() def render(self, camera: Camera): # Perform actual gaussian rendering batch = to_cuda(camera.to_batch()) rgb, acc, dpt = self.gaussian_model.render(batch) if self.render_depth:
rgba = torch.cat([depth_curve_fn(dpt, cm=self.dpt_cm), acc], dim=-1) # H, W, 4
3
2023-10-17 04:48:46+00:00
16k
0xbitches/sd-webui-lcm
scripts/main.py
[ { "identifier": "LCMScheduler", "path": "lcm/lcm_scheduler.py", "snippet": "class LCMScheduler(SchedulerMixin, ConfigMixin):\n \"\"\"\n `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with\n non-Markovian guidance.\n\n This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic\n methods the library implements for all schedulers such as loading and saving.\n\n Args:\n num_train_timesteps (`int`, defaults to 1000):\n The number of diffusion steps to train the model.\n beta_start (`float`, defaults to 0.0001):\n The starting `beta` value of inference.\n beta_end (`float`, defaults to 0.02):\n The final `beta` value.\n beta_schedule (`str`, defaults to `\"linear\"`):\n The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, *optional*):\n Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`.\n clip_sample (`bool`, defaults to `True`):\n Clip the predicted sample for numerical stability.\n clip_sample_range (`float`, defaults to 1.0):\n The maximum magnitude for sample clipping. Valid only when `clip_sample=True`.\n set_alpha_to_one (`bool`, defaults to `True`):\n Each diffusion step uses the alphas product value at that step and at the previous one. For the final step\n there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the alpha value at step 0.\n steps_offset (`int`, defaults to 0):\n An offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable\n Diffusion.\n prediction_type (`str`, defaults to `epsilon`, *optional*):\n Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process),\n `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen\n Video](https://imagen.research.google/video/paper.pdf) paper).\n thresholding (`bool`, defaults to `False`):\n Whether to use the \"dynamic thresholding\" method. This is unsuitable for latent-space diffusion models such\n as Stable Diffusion.\n dynamic_thresholding_ratio (`float`, defaults to 0.995):\n The ratio for the dynamic thresholding method. Valid only when `thresholding=True`.\n sample_max_value (`float`, defaults to 1.0):\n The threshold value for dynamic thresholding. Valid only when `thresholding=True`.\n timestep_spacing (`str`, defaults to `\"leading\"`):\n The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and\n Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information.\n rescale_betas_zero_snr (`bool`, defaults to `False`):\n Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and\n dark samples instead of limiting it to samples with medium brightness. Loosely related to\n [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506).\n \"\"\"\n\n # _compatibles = [e.name for e in KarrasDiffusionSchedulers]\n order = 1\n\n @register_to_config\n def __init__(\n self,\n num_train_timesteps: int = 1000,\n beta_start: float = 0.0001,\n beta_end: float = 0.02,\n beta_schedule: str = \"linear\",\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None,\n clip_sample: bool = True,\n set_alpha_to_one: bool = True,\n steps_offset: int = 0,\n prediction_type: str = \"epsilon\",\n thresholding: bool = False,\n dynamic_thresholding_ratio: float = 0.995,\n clip_sample_range: float = 1.0,\n sample_max_value: float = 1.0,\n timestep_spacing: str = \"leading\",\n rescale_betas_zero_snr: bool = False,\n ):\n if trained_betas is not None:\n self.betas = torch.tensor(trained_betas, dtype=torch.float32)\n elif beta_schedule == \"linear\":\n self.betas = torch.linspace(\n beta_start, beta_end, num_train_timesteps, dtype=torch.float32)\n elif beta_schedule == \"scaled_linear\":\n # this schedule is very specific to the latent diffusion model.\n self.betas = (\n torch.linspace(beta_start**0.5, beta_end**0.5,\n num_train_timesteps, dtype=torch.float32) ** 2\n )\n elif beta_schedule == \"squaredcos_cap_v2\":\n # Glide cosine schedule\n self.betas = betas_for_alpha_bar(num_train_timesteps)\n else:\n raise NotImplementedError(\n f\"{beta_schedule} does is not implemented for {self.__class__}\")\n\n # Rescale for zero SNR\n if rescale_betas_zero_snr:\n self.betas = rescale_zero_terminal_snr(self.betas)\n\n self.alphas = 1.0 - self.betas\n self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)\n\n # At every step in ddim, we are looking into the previous alphas_cumprod\n # For the final step, there is no previous alphas_cumprod because we are already at 0\n # `set_alpha_to_one` decides whether we set this parameter simply to one or\n # whether we use the final alpha of the \"non-previous\" one.\n self.final_alpha_cumprod = torch.tensor(\n 1.0) if set_alpha_to_one else self.alphas_cumprod[0]\n\n # standard deviation of the initial noise distribution\n self.init_noise_sigma = 1.0\n\n # setable values\n self.num_inference_steps = None\n self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[\n ::-1].copy().astype(np.int64))\n\n def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:\n \"\"\"\n Ensures interchangeability with schedulers that need to scale the denoising model input depending on the\n current timestep.\n\n Args:\n sample (`torch.FloatTensor`):\n The input sample.\n timestep (`int`, *optional*):\n The current timestep in the diffusion chain.\n\n Returns:\n `torch.FloatTensor`:\n A scaled input sample.\n \"\"\"\n return sample\n\n def _get_variance(self, timestep, prev_timestep):\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n variance = (beta_prod_t_prev / beta_prod_t) * \\\n (1 - alpha_prod_t / alpha_prod_t_prev)\n\n return variance\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler._threshold_sample\n def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n \"Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the\n prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by\n s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing\n pixels from saturation at each step. We find that dynamic thresholding results in significantly better\n photorealism as well as better image-text alignment, especially when using very large guidance weights.\"\n\n https://arxiv.org/abs/2205.11487\n \"\"\"\n dtype = sample.dtype\n batch_size, channels, height, width = sample.shape\n\n if dtype not in (torch.float32, torch.float64):\n # upcast for quantile calculation, and clamp not implemented for cpu half\n sample = sample.float()\n\n # Flatten sample for doing quantile calculation along each image\n sample = sample.reshape(batch_size, channels * height * width)\n\n abs_sample = sample.abs() # \"a certain percentile absolute pixel value\"\n\n s = torch.quantile(\n abs_sample, self.config.dynamic_thresholding_ratio, dim=1)\n s = torch.clamp(\n s, min=1, max=self.config.sample_max_value\n ) # When clamped to min=1, equivalent to standard clipping to [-1, 1]\n\n # (batch_size, 1) because clamp will broadcast along dim=0\n s = s.unsqueeze(1)\n # \"we threshold xt0 to the range [-s, s] and then divide by s\"\n sample = torch.clamp(sample, -s, s) / s\n\n sample = sample.reshape(batch_size, channels, height, width)\n sample = sample.to(dtype)\n\n return sample\n\n def set_timesteps(self, num_inference_steps: int, original_inference_steps: int, device: Union[str, torch.device] = None):\n \"\"\"\n Sets the discrete timesteps used for the diffusion chain (to be run before inference).\n\n Args:\n num_inference_steps (`int`):\n The number of diffusion steps used when generating samples with a pre-trained model.\n \"\"\"\n\n if num_inference_steps > self.config.num_train_timesteps:\n raise ValueError(\n f\"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:\"\n f\" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle\"\n f\" maximal {self.config.num_train_timesteps} timesteps.\"\n )\n\n self.num_inference_steps = num_inference_steps\n\n # LCM Timesteps Setting: # Linear Spacing\n c = self.config.num_train_timesteps // original_inference_steps\n lcm_origin_timesteps = np.asarray(\n list(range(1, original_inference_steps + 1))) * c - 1 # LCM Training Steps Schedule\n skipping_step = len(lcm_origin_timesteps) // num_inference_steps\n # LCM Inference Steps Schedule\n timesteps = lcm_origin_timesteps[::-\n skipping_step][:num_inference_steps]\n\n self.timesteps = torch.from_numpy(timesteps.copy()).to(device)\n\n def get_scalings_for_boundary_condition_discrete(self, t):\n self.sigma_data = 0.5 # Default: 0.5\n\n # By dividing 0.1: This is almost a delta function at t=0.\n c_skip = self.sigma_data**2 / (\n (t / 0.1) ** 2 + self.sigma_data**2\n )\n c_out = ((t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5)\n return c_skip, c_out\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timeindex: int,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[LCMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`):\n The direct output from learned diffusion model.\n timestep (`float`):\n The current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n A current instance of a sample created by the diffusion process.\n eta (`float`):\n The weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`, defaults to `False`):\n If `True`, computes \"corrected\" `model_output` from the clipped predicted original sample. Necessary\n because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no\n clipping has happened, \"corrected\" `model_output` would coincide with the one provided as input and\n `use_clipped_model_output` has no effect.\n generator (`torch.Generator`, *optional*):\n A random number generator.\n variance_noise (`torch.FloatTensor`):\n Alternative to generating noise with `generator` by directly providing the noise for the variance\n itself. Useful for methods such as [`CycleDiffusion`].\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`.\n\n Returns:\n [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`:\n If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a\n tuple is returned where the first element is the sample tensor.\n\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # 1. get previous step value\n prev_timeindex = timeindex + 1\n if prev_timeindex < len(self.timesteps):\n prev_timestep = self.timesteps[prev_timeindex]\n else:\n prev_timestep = timestep\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n beta_prod_t_prev = 1 - alpha_prod_t_prev\n\n # 3. Get scalings for boundary conditions\n c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(\n timestep)\n\n # 4. Different Parameterization:\n parameterization = self.config.prediction_type\n\n if parameterization == \"epsilon\": # noise-prediction\n pred_x0 = (sample - beta_prod_t.sqrt() *\n model_output) / alpha_prod_t.sqrt()\n\n elif parameterization == \"sample\": # x-prediction\n pred_x0 = model_output\n\n elif parameterization == \"v_prediction\": # v-prediction\n pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output\n\n # 4. Denoise model output using boundary conditions\n denoised = c_out * pred_x0 + c_skip * sample\n\n # 5. Sample z ~ N(0, I), For MultiStep Inference\n # Noise is not used for one-step sampling.\n if len(self.timesteps) > 1:\n noise = torch.randn(model_output.shape).to(model_output.device)\n prev_sample = alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise\n else:\n prev_sample = denoised\n\n if not return_dict:\n return (prev_sample, denoised)\n\n return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised)\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.add_noise\n\n def add_noise(\n self,\n original_samples: torch.FloatTensor,\n noise: torch.FloatTensor,\n timesteps: torch.IntTensor,\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as original_samples\n alphas_cumprod = self.alphas_cumprod.to(\n device=original_samples.device, dtype=original_samples.dtype)\n timesteps = timesteps.to(original_samples.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(original_samples.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n noisy_samples = sqrt_alpha_prod * original_samples + \\\n sqrt_one_minus_alpha_prod * noise\n return noisy_samples\n\n # Copied from diffusers.schedulers.scheduling_ddpm.DDPMScheduler.get_velocity\n def get_velocity(\n self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor\n ) -> torch.FloatTensor:\n # Make sure alphas_cumprod and timestep have same device and dtype as sample\n alphas_cumprod = self.alphas_cumprod.to(\n device=sample.device, dtype=sample.dtype)\n timesteps = timesteps.to(sample.device)\n\n sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5\n sqrt_alpha_prod = sqrt_alpha_prod.flatten()\n while len(sqrt_alpha_prod.shape) < len(sample.shape):\n sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)\n\n sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()\n while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape):\n sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)\n\n velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample\n return velocity\n\n def __len__(self):\n return self.config.num_train_timesteps" }, { "identifier": "LatentConsistencyModelPipeline", "path": "lcm/lcm_pipeline.py", "snippet": "class LatentConsistencyModelPipeline(DiffusionPipeline):\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: None,\n safety_checker: None,\n feature_extractor: CLIPImageProcessor\n ):\n super().__init__()\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (\n len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(\n vae_scale_factor=self.vae_scale_factor)\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds: None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n \"\"\"\n\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(\n prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1: -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(\n dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(\n bs_embed * num_images_per_prompt, seq_len, -1)\n\n # Don't need to get uncond prompt embedding because of LCM Guided Distillation\n return prompt_embeds\n\n # ¯\\_(ツ)_/¯\n def run_safety_checker(self, image, device, dtype):\n return image, None\n \n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, latents=None):\n shape = (batch_size, num_channels_latents, height //\n self.vae_scale_factor, width // self.vae_scale_factor)\n if latents is None:\n latents = torch.randn(shape, dtype=dtype).to(device)\n else:\n latents = latents.to(device)\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):\n \"\"\"\n see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298\n Args:\n timesteps: torch.Tensor: generate embedding vectors at these timesteps\n embedding_dim: int: dimension of the embeddings to generate\n dtype: data type of the generated embeddings\n\n Returns:\n embedding vectors with shape `(len(timesteps), embedding_dim)`\n \"\"\"\n assert len(w.shape) == 1\n w = w * 1000.\n\n half_dim = embedding_dim // 2\n emb = torch.log(torch.tensor(10000.)) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)\n emb = w.to(dtype)[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1))\n assert emb.shape == (w.shape[0], embedding_dim)\n return emb\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = 768,\n width: Optional[int] = 768,\n guidance_scale: float = 7.5,\n num_images_per_prompt: Optional[int] = 1,\n latents: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 4,\n original_inference_steps: int = 50,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n device: Optional[Union[str, torch.device]] = None,\n ):\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds=prompt_embeds,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, original_inference_steps)\n timesteps = self.scheduler.timesteps\n\n # 5. Prepare latent variable\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n latents,\n )\n bs = batch_size * num_images_per_prompt\n\n # 6. Get Guidance Scale Embedding\n w = torch.tensor(guidance_scale).repeat(bs)\n w_embedding = self.get_w_embedding(w, embedding_dim=256).to(\n device=device, dtype=latents.dtype)\n\n # 7. LCM MultiStep Sampling Loop:\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n\n ts = torch.full((bs,), t, device=device, dtype=torch.long)\n latents = latents.to(prompt_embeds.dtype)\n\n # model prediction (v-prediction, eps, x)\n model_pred = self.unet(\n latents,\n ts,\n timestep_cond=w_embedding,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False)[0]\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, denoised = self.scheduler.step(\n model_pred, i, t, latents, return_dict=False)\n\n # # call the callback, if provided\n # if i == len(timesteps) - 1:\n progress_bar.update()\n\n denoised = denoised.to(prompt_embeds.dtype)\n if not output_type == \"latent\":\n image = self.vae.decode(\n denoised / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(\n image, device, prompt_embeds.dtype)\n else:\n image = denoised\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(\n image, output_type=output_type, do_denormalize=do_denormalize)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "LatentConsistencyModelImg2ImgPipeline", "path": "lcm/lcm_i2i_pipeline.py", "snippet": "class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):\n _optional_components = [\"scheduler\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: \"LCMSchedulerWithTimestamp\",\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = False,\n ):\n super().__init__()\n\n scheduler = (\n scheduler\n if scheduler is not None\n else LCMSchedulerWithTimestamp(\n beta_start=0.00085, beta_end=0.0120, beta_schedule=\"scaled_linear\", prediction_type=\"epsilon\"\n )\n )\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds: None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n \"\"\"\n\n if prompt is not None and isinstance(prompt, str):\n pass\n elif prompt is not None and isinstance(prompt, list):\n len(prompt)\n else:\n prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n if self.text_encoder is not None:\n prompt_embeds_dtype = self.text_encoder.dtype\n elif self.unet is not None:\n prompt_embeds_dtype = self.unet.dtype\n else:\n prompt_embeds_dtype = prompt_embeds.dtype\n\n prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # Don't need to get uncond prompt embedding because of LCM Guided Distillation\n return prompt_embeds\n\n # ¯\\_(ツ)_/¯\n def run_safety_checker(self, image, device, dtype):\n return image, None\n\n def prepare_latents(self, image, timestep, batch_size, num_channels_latents, height, width, dtype, device, latents=None, generator=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n\n if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):\n raise ValueError(\n f\"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}\"\n )\n\n image = image.to(device=device, dtype=dtype)\n\n # batch_size = batch_size * num_images_per_prompt\n\n if image.shape[1] == 4:\n init_latents = image\n\n else:\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n elif isinstance(generator, list):\n init_latents = [\n self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)\n ]\n init_latents = torch.cat(init_latents, dim=0)\n else:\n init_latents = self.vae.encode(image).latent_dist.sample(generator)\n\n init_latents = self.vae.config.scaling_factor * init_latents\n\n if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:\n # expand init_latents for batch_size\n deprecation_message = (\n f\"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial\"\n \" images (`image`). Initial images are now duplicating to match the number of text prompts. Note\"\n \" that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update\"\n \" your script to pass as many initial images as text prompts to suppress this warning.\"\n )\n # deprecate(\"len(prompt) != len(image)\", \"1.0.0\", deprecation_message, standard_warn=False)\n additional_image_per_prompt = batch_size // init_latents.shape[0]\n init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)\n elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:\n raise ValueError(\n f\"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts.\"\n )\n else:\n init_latents = torch.cat([init_latents], dim=0)\n\n shape = init_latents.shape\n noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n\n # get latents\n init_latents = self.scheduler.add_noise(init_latents, noise, timestep)\n latents = init_latents\n\n return latents\n\n if latents is None:\n latents = torch.randn(shape, dtype=dtype).to(device)\n else:\n latents = latents.to(device)\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):\n \"\"\"\n see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298\n Args:\n timesteps: torch.Tensor: generate embedding vectors at these timesteps\n embedding_dim: int: dimension of the embeddings to generate\n dtype: data type of the generated embeddings\n Returns:\n embedding vectors with shape `(len(timesteps), embedding_dim)`\n \"\"\"\n assert len(w.shape) == 1\n w = w * 1000.0\n\n half_dim = embedding_dim // 2\n emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)\n emb = w.to(dtype)[:, None] * emb[None, :]\n emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)\n if embedding_dim % 2 == 1: # zero pad\n emb = torch.nn.functional.pad(emb, (0, 1))\n assert emb.shape == (w.shape[0], embedding_dim)\n return emb\n\n def get_timesteps(self, num_inference_steps, strength, device):\n # get the original timestep using init_timestep\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n\n t_start = max(num_inference_steps - init_timestep, 0)\n timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]\n\n return timesteps, num_inference_steps - t_start\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n strength: float = 0.8,\n height: Optional[int] = 768,\n width: Optional[int] = 768,\n guidance_scale: float = 7.5,\n num_images_per_prompt: Optional[int] = 1,\n latents: Optional[torch.FloatTensor] = None,\n num_inference_steps: int = 4,\n original_inference_steps: int = 50,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n device: Optional[Union[str, torch.device]] = None,\n ):\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = device\n # do_classifier_free_guidance = guidance_scale > 0.0 # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)\n\n # 3. Encode input prompt\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n prompt_embeds=prompt_embeds,\n )\n\n # 3.5 encode image\n image = self.image_processor.preprocess(image=image)\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(strength, num_inference_steps, original_inference_steps)\n # timesteps = self.scheduler.timesteps\n # timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, 1.0, device)\n timesteps = self.scheduler.timesteps\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n\n # 5. Prepare latent variable\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n image,\n latent_timestep,\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n latents,\n )\n bs = batch_size * num_images_per_prompt\n\n # 6. Get Guidance Scale Embedding\n w = torch.tensor(guidance_scale).repeat(bs)\n w_embedding = self.get_w_embedding(w, embedding_dim=256).to(device=device, dtype=latents.dtype)\n\n # 7. LCM MultiStep Sampling Loop:\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n ts = torch.full((bs,), t, device=device, dtype=torch.long)\n latents = latents.to(prompt_embeds.dtype)\n\n # model prediction (v-prediction, eps, x)\n model_pred = self.unet(\n latents,\n ts,\n timestep_cond=w_embedding,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # compute the previous noisy sample x_t -> x_t-1\n latents, denoised = self.scheduler.step(model_pred, i, t, latents, return_dict=False)\n\n # # call the callback, if provided\n # if i == len(timesteps) - 1:\n progress_bar.update()\n\n denoised = denoised.to(prompt_embeds.dtype)\n if not output_type == \"latent\":\n image = self.vae.decode(denoised / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = denoised\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" } ]
from concurrent.futures import ThreadPoolExecutor from pathlib import Path from typing import Optional from lcm.lcm_scheduler import LCMScheduler from lcm.lcm_pipeline import LatentConsistencyModelPipeline from lcm.lcm_i2i_pipeline import LatentConsistencyModelImg2ImgPipeline from diffusers.image_processor import PipelineImageInput from modules import script_callbacks from PIL import Image, PngImagePlugin import uuid import modules.scripts as scripts import modules.shared import os import random import time import numpy as np import gradio as gr import torch import cv2
11,113
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
DESCRIPTION = '''# Latent Consistency Model Running [LCM_Dreamshaper_v7](https://huggingface.co/SimianLuo/LCM_Dreamshaper_v7) | [Project Page](https://latent-consistency-models.github.io) | [Extension Page](https://github.com/0xbitches/sd-webui-lcm) ''' MAX_SEED = np.iinfo(np.int32).max MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "768")) class Script(scripts.Script): def __init__(self) -> None: super().__init__() def title(self): return "LCM" def show(self, is_img2img): return scripts.AlwaysVisible def ui(self, is_img2img): return () def randomize_seed_fn(seed: int, randomize_seed: bool) -> int: if randomize_seed: seed = random.randint(0, MAX_SEED) return seed def save_image(img, metadata: dict): save_dir = os.path.join(scripts.basedir(), "outputs/txt2img-images/LCM/") Path(save_dir).mkdir(exist_ok=True, parents=True) seed = metadata["seed"] unique_id = uuid.uuid4() filename = save_dir + f"{unique_id}-{seed}" + ".png" meta_tuples = [(k, str(v)) for k, v in metadata.items()] png_info = PngImagePlugin.PngInfo() for k, v in meta_tuples: png_info.add_text(k, v) img.save(filename, pnginfo=png_info) return filename def save_images(image_array, metadata: dict): paths = [] with ThreadPoolExecutor() as executor: paths = list(executor.map(save_image, image_array, [metadata]*len(image_array))) return paths def generate( prompt: str, seed: int = 0, width: int = 512, height: int = 512, guidance_scale: float = 8.0, num_inference_steps: int = 4, num_images: int = 4, randomize_seed: bool = False, use_fp16: bool = True, use_torch_compile: bool = False, use_cpu: bool = False, progress=gr.Progress(track_tqdm=True) ) -> Image.Image: seed = randomize_seed_fn(seed, randomize_seed) torch.manual_seed(seed) selected_device = modules.shared.device if use_cpu: selected_device = "cpu" if use_fp16: use_fp16 = False print("LCM warning: running on CPU, overrode FP16 with FP32")
scheduler = LCMScheduler.from_pretrained(
0
2023-10-22 11:53:48+00:00
16k
kylesargent/ZeroNVS
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n # import pdb\n # pdb.set_trace()\n\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x = x.clone()\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n assert self.unbounded\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,932
isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale
points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1)
2
2023-10-24 19:02:44+00:00
16k
princeton-nlp/LLM-Shearing
llmshearing/models/composer_pythia.py
[ { "identifier": "L0Module", "path": "llmshearing/models/l0_module.py", "snippet": "class L0Module(nn.Module):\n def __init__(self, cfg, device):\n super(L0Module, self).__init__()\n\n # base and target model info\n n_matrix_mlp = 2 if \"pythia\" in cfg.name else 3\n self.base_model_info = self.set_model_info(cfg, n_matrix_mlp=n_matrix_mlp) \n l0_module_cfg = cfg.l0_module\n self.target_model_info = None\n target_model_cfg = getattr(l0_module_cfg, \"target_model\", None)\n if target_model_cfg is not None:\n self.target_model_info = self.set_model_info(target_model_cfg, n_matrix_mlp=n_matrix_mlp)\n \n # l0 config\n self.pruning_modules = l0_module_cfg.pruning_modules \n self.start_sparsity = l0_module_cfg.start_sparsity \n self.lagrangian_warmup_steps = Time.from_timestring(l0_module_cfg.lagrangian_warmup_steps).value\n self.device = device\n self.eval_target_model = l0_module_cfg.get(\"eval_target_model\", True)\n \n # l0 params\n self.lambdas = {}\n self.lambdas[\"lambda_1\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.lambdas[\"lambda_2\"] = torch.nn.Parameter(torch.tensor(0.0, device=device))\n self.masks = {}\n for pruning_module in self.pruning_modules:\n self.initialize_one_module(pruning_module)\n self.masks = torch.nn.ModuleDict(self.masks)\n self.lambdas = torch.nn.ParameterDict(self.lambdas)\n \n # config after initialization\n self.prunable_model_size = self.calculate_prunable_model_size(self.base_model_info)\n if target_model_cfg is not None:\n self.prunable_target_model_size = self.calculate_prunable_model_size(self.target_model_info)\n self.target_sparsity = 1 - self.prunable_target_model_size / self.prunable_model_size\n else:\n self.target_sparsity = l0_module_cfg.target_sparsity\n\n print(\"********** Initializing L0 Module **********\") \n for pruning_module in self.pruning_modules:\n print(f\"***** {pruning_module} *****\")\n print(f\"z.shape\", self.masks[pruning_module].z_loga.shape)\n print(f\"size\", self.masks[pruning_module].mask_size)\n print(f\"prunable model size: {self.prunable_model_size}\")\n \n \n def set_model_info(self, cfg, n_matrix_mlp):\n ns = NS() \n ns.hidden_size = cfg.d_model\n ns.intermediate_size = cfg.intermediate_size\n ns.num_attention_heads = cfg.n_heads\n ns.mlp_num_per_layer = 1\n ns.dim_per_head = ns.hidden_size // ns.num_attention_heads \n ns.num_layers = cfg.n_layers\n ns.vocab_size = cfg.vocab_size\n\n ns.params_per_head_layer = ns.hidden_size * ns.hidden_size * 4\n ns.params_per_head = ns.params_per_head_layer // ns.num_attention_heads\n ns.params_per_mlp_layer = ns.hidden_size * ns.intermediate_size * n_matrix_mlp\n ns.params_per_intermediate_dim = ns.params_per_mlp_layer // ns.intermediate_size\n\n ns.full_model_size = (ns.params_per_head_layer + ns.params_per_mlp_layer) * ns.num_layers\n return ns\n \n def calculate_prunable_model_size(self, ns: NS):\n prunable_mlp_size = ns.params_per_mlp_layer * ns.num_layers\n prunable_head_layer_size = ns.params_per_head_layer * ns.num_layers\n prunable_model_size = 0\n if \"hidden\" in self.pruning_modules:\n return prunable_mlp_size + prunable_head_layer_size\n if \"head_layer\" in self.pruning_modules or \"head\" in self.pruning_modules:\n prunable_model_size += prunable_head_layer_size\n if \"mlp\" in self.pruning_modules or \"intermediate\" in self.pruning_modules:\n prunable_model_size += prunable_mlp_size\n return prunable_model_size\n \n def initialize_one_module(self, module_name: str):\n func_name = f\"initialize_{module_name}\"\n try:\n method = getattr(self, func_name)\n except AttributeError:\n raise NotImplementedError(\"Instance `{}` does not implement `{}`\".format(self, func_name))\n method()\n \n def initialize_hidden(self):\n mask_shape = [self.base_model_info.hidden_size]\n num_params_per_mask=self.base_model_info.hidden_size * 4 + self.base_model_info.hidden_size * 4 * 2\n \n target_hidden_sparsity = None; pd=None; target_mask_size=None; \n if self.target_model_info is not None:\n target_hidden_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n target_mask_size = self.target_model_info.hidden_size\n pd = {\"lambda_1_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_hidden\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n hidden_mask = Mask(name=\"hidden\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=[self.base_model_info.hidden_size],\n target_sparsity=target_hidden_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"hidden\"] = hidden_mask\n\n def initialize_head(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads]\n num_params_per_mask = self.base_model_info.params_per_head\n mask_output_shape = [self.base_model_info.num_layers, 1, self.base_model_info.num_attention_heads, 1] \n \n target_head_sparsity = None; pd = {} ; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_sparsity = 1 - self.target_model_info.num_attention_heads / self.base_model_info.num_attention_heads\n target_mask_size = self.target_model_info.num_attention_heads\n pd = {\"lambda_1_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n head_mask = Mask(name=\"head\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head\"] = head_mask \n\n def initialize_qk_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_qk_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_qk_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_qk_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n qk_head_dim = Mask(name=\"qk_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_qk_head_dim_sparsity,\n target_mask_size=self.target_model_info.hidden_size,\n device=self.device)\n self.masks[\"qk_head_dim\"] = qk_head_dim \n \n \n def initialize_vo_head_dim(self): # only campatible when target model info is available\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.num_attention_heads, self.base_model_info.dim_per_head]\n num_params_per_mask = 2 * self.base_model_info.hidden_size\n mask_output_shape = [self.base_model_info.num_layers, self.base_model_info.hidden_size] \n \n target_vo_head_dim_sparsity = None; pd = {} \n if self.target_model_info is not None:\n target_vo_head_dim_sparsity = 1 - self.target_model_info.hidden_size / self.base_model_info.hidden_size\n pd = {\"lambda_1_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_vo_head_dim\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n vo_head_dim = Mask(name=\"vo_head_dim\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_vo_head_dim_sparsity,\n device=self.device)\n self.masks[\"vo_head_dim\"] = vo_head_dim \n \n def initialize_head_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_head_layer_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_head_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_head_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n head_layer_mask = Mask(name=\"head_layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_head_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"head_layer\"] = head_layer_mask\n \n def initialize_intermediate(self):\n mask_shape = [self.base_model_info.num_layers, self.base_model_info.intermediate_size]\n num_params_per_mask=self.base_model_info.params_per_intermediate_dim\n mask_output_shape = [self.base_model_info.num_layers, 1, 1, self.base_model_info.intermediate_size] \n \n target_int_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_int_sparsity = 1 - self.target_model_info.intermediate_size / self.base_model_info.intermediate_size\n target_mask_size = self.target_model_info.intermediate_size\n pd = {\"lambda_1_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_intermediate\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n int_mask = Mask(name=\"intermediate\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_int_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"intermediate\"] = int_mask\n \n\n def initialize_mlp(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_mlp_sparsity = None; pd = {}; target_mask_size=None; \n if self.target_model_info is not None:\n target_mlp_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_mlp\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n mlp_mask = Mask(name=\"mlp\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_mlp_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model)\n self.masks[\"mlp\"] = mlp_mask \n\n def initialize_layer(self):\n mask_shape = [self.base_model_info.num_layers]\n num_params_per_mask=self.base_model_info.params_per_head * self.base_model_info.num_attention_heads + self.base_model_info.params_per_mlp_layer\n mask_output_shape = [self.base_model_info.num_layers] \n \n target_layer_sparsity = None; target_mask_size=None; pd = {}\n if self.target_model_info is not None:\n target_layer_sparsity = 1 - self.target_model_info.num_layers / self.base_model_info.num_layers\n target_mask_size = self.target_model_info.num_layers\n pd = {\"lambda_1_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device)),\n \"lambda_2_layer\": torch.nn.Parameter(torch.tensor(0.0, device=self.device))}\n self.lambdas.update(pd)\n \n layer_mask = Mask(name=\"layer\",\n mask_shape=mask_shape,\n num_params_per_mask=num_params_per_mask,\n mask_output_shape=mask_output_shape,\n target_sparsity=target_layer_sparsity,\n target_mask_size=target_mask_size,\n device=self.device,\n eval_target_model=self.eval_target_model) \n self.masks[\"layer\"] = layer_mask \n \n def constrain_parameters(self):\n for key in self.masks:\n self.masks[key].constrain_parameters()\n\n def calculate_expected_score_sparsity(self):\n expected_scores = {}\n expected_sparsitys = {}\n for key in self.masks:\n score, sparsity = self.masks[key].calculate_expected_score_sparsity()\n expected_scores[key] = score\n expected_sparsitys[key] = sparsity\n return expected_scores, expected_sparsitys\n \n def transform_scores_for_head(self, expected_scores: dict):\n head_score = expected_scores[\"head\"] # 12 * 12\n\n head_layer_score = None\n if \"head_layer\" in expected_scores:\n head_layer_score = expected_scores[\"head_layer\"]\n elif \"layer\" in expected_scores:\n head_layer_score = expected_scores[\"layer\"] # 12\n if head_layer_score is not None:\n head_layer_score = head_layer_score.view(-1, 1) # 12 * 1\n \n return head_layer_score, head_score\n\n def transform_scores_for_mlp(self, expected_scores: dict):\n mlp_score = None\n if \"mlp\" in expected_scores:\n mlp_score = expected_scores[\"mlp\"] # 12\n elif \"layer\" in expected_scores:\n mlp_score = expected_scores[\"layer\"] # 12\n if mlp_score is not None:\n mlp_score = mlp_score.unsqueeze(-1)\n \n intermediate_score = expected_scores[\"intermediate\"] # 12 * 3072\n return mlp_score, intermediate_score\n\n\n def get_expected_num_params(self, expected_scores: dict): #! calculate the current parsity\n num_parameters = 0\n \n # 12 * 1 \n # 12 * 12\n head_layer_score, head_score = self.transform_scores_for_head(expected_scores)\n mlp_score, int_score = self.transform_scores_for_mlp(expected_scores)\n \n head_score = (head_layer_score * head_score) # 12 * 12\n int_score = (mlp_score * int_score) # 12 * 3072\n\n qk_score = None\n if \"qk_head_dim\" in expected_scores:\n qk_head_dim_score = expected_scores[\"qk_head_dim\"] # num_layer * hidden_size\n vo_head_dim_score = expected_scores[\"vo_head_dim\"] # num_layer * hidden_size\n qk_head_dim_score = qk_head_dim_score.view(qk_head_dim_score.shape[0], -1) # 12 * 768\n vo_head_dim_score = vo_head_dim_score.view(vo_head_dim_score.shape[0], -1) # 12 * 768\n head_score = torch.repeat_interleave(head_score, self.base_model_info.dim_per_head, dim=1) # 12 * 768\n\n qk_score = head_score * qk_head_dim_score # 12 * 768\n vo_score = head_score * vo_head_dim_score # 12 * 768\n \n if \"hidden\" in expected_scores:\n hidden_score = expected_scores[\"hidden\"] # 768 \n \n if qk_score is None:\n num_parameters += torch.outer(hidden_score, head_score.reshape(-1)).sum() * self.masks.head.num_params_per_mask / self.base_model_info.hidden_size # 768 * 144\n num_parameters += torch.outer(hidden_score, int_score.reshape(-1)).sum() * self.masks.intermediate.num_params_per_mask / self.base_model_info.hidden_size # 768 * 36864\n else:\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), qk_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), vo_score.unsqueeze(1))) * 2 # 12 * 768 * 768\n num_parameters += torch.sum(torch.matmul(hidden_score.reshape(1, -1, 1), int_score.unsqueeze(1))) * 3 # 12 * 768 * 3072\n else:\n num_parameters += torch.sum(head_score) * self.masks.head.num_params_per_mask\n num_parameters += torch.sum(int_score) * self.masks.intermediate.num_params_per_mask\n return num_parameters\n \n def get_target_sparsity(self, pruned_steps: int, full_sparsity: float = None):\n target_sparsity = full_sparsity\n if getattr(self, \"lagrangian_warmup_steps\", 0) > 0:\n target_sparsity = (target_sparsity - self.start_sparsity) * min(1, pruned_steps / self.lagrangian_warmup_steps) + self.start_sparsity\n return target_sparsity\n\n\n def lagrangian_regularization(self, pruned_steps: int):\n def _lag_loss(expected_sparsity: torch.tensor, target_sparsity: float, lambda_1: torch.tensor, lambda_2: torch.tensor):\n lagrangian_loss = lambda_1 * (expected_sparsity - target_sparsity) + lambda_2 * (expected_sparsity - target_sparsity) ** 2 \n lagrangian_loss = lagrangian_loss.mean()\n return lagrangian_loss\n\n target_sparsity = self.get_target_sparsity(pruned_steps, self.target_sparsity) \n expected_scores, expected_sparsitys = self.calculate_expected_score_sparsity()\n expected_size = self.get_expected_num_params(expected_scores) #! calculate \\bar s\n expected_sparsity = 1 - expected_size / self.prunable_model_size\n \n return_v = {}\n if self.target_model_info is None:\n lagrangian_loss = _lag_loss(expected_sparsity, target_sparsity, self.lambdas[\"lambda_1\"], self.lambdas[\"lambda_2\"])\n return_v = {\"expected_sparsity\": expected_sparsity.item(), \"target_sparsity\": target_sparsity}\n for key in expected_sparsitys:\n return_v[f\"expected_{key}_sparsity\"] = expected_sparsitys[key].mean().item()\n else:\n lagrangian_loss = 0\n return_v = {}\n for pruning_module in self.pruning_modules:\n ts = self.get_target_sparsity(pruned_steps, self.masks[pruning_module].target_sparsity)\n expected_ts = expected_sparsitys[pruning_module] \n lagrangian_loss += _lag_loss(expected_ts, ts, self.lambdas[f\"lambda_1_{pruning_module}\"], self.lambdas[f\"lambda_2_{pruning_module}\"])\n expected_ts = expected_ts.mean().item()\n return_v.update({\"expected_{}_sparsity\".format(pruning_module): expected_ts, \"target_{}_sparsity\".format(pruning_module): ts})\n return_v[\"expected_sparsity\"] = expected_sparsity.item()\n return_v[\"target_sparsity\"] = target_sparsity\n\n\n # return_v might not matter\n return lagrangian_loss, return_v\n \n def forward(self, calculate_lagrangian: bool = False, pruned_steps: int = 0):\n self.constrain_parameters()\n if calculate_lagrangian:\n return self.lagrangian_regularization(pruned_steps)\n \n zs = {f\"{pruning_module}_z\": [] for pruning_module in self.pruning_modules}\n \n if \"layer\" in self.pruning_modules:\n zs.pop(\"layer_z\")\n zs[\"mlp_z\"] = []\n zs[\"head_layer_z\"] = []\n \n if self.training:\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.sample_z()\n zs[f\"{pruning_module}_z\"] = z\n else: # removed layerwise! \n with torch.no_grad():\n for pruning_module in self.pruning_modules:\n mask = self.masks[pruning_module]\n z = mask.deterministic_z()\n zs[f\"{pruning_module}_z\"] = z\n if \"layer_z\" in zs:\n zs[\"mlp_z\"] = zs.pop(\"layer_z\")\n zs[\"head_layer_z\"] = zs[\"mlp_z\"]\n return zs " }, { "identifier": "ComposerMosaicLlama", "path": "llmshearing/models/composer_llama.py", "snippet": "class ComposerMosaicLlama(ComposerModel):\n \"\"\" Llama model with the Composer model interface. \"\"\"\n def __init__(self, cfg):\n super().__init__()\n self.model = LlamaModel(cfg)\n self.ref_model = None\n self.num_fwd_flops = self._compute_num_fwd_flops()\n self.train_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n self.eval_metrics = {\n 'LanguageCrossEntropy': LanguageCrossEntropy(),\n 'Perplexity': LanguagePerplexity(),\n }\n\n self.set_names = getattr(cfg, \"set_names\", None)\n if self.set_names is not None:\n self.set_name_to_id = {set_name: i for i, set_name in enumerate(self.set_names)}\n self.set_id_to_name = {i: set_name for i, set_name in enumerate(self.set_names)}\n \n for set_name in self.set_names:\n # add train and eval metrics for each set\n self.train_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.eval_metrics[f'{set_name}_LanguageCrossEntropy'] = DomainLanguageCrossEntropy(set_name=set_name)\n self.train_metrics[f'{set_name}_count'] = DomainCount(set_name=set_name, set_index=self.set_name_to_id[set_name]) \n\n def prune_params(self, zs=None):\n self.model.prune_params(zs)\n \n def get_targets(self, batch):\n targets = torch.roll(batch['labels'], shifts=-1)\n targets[:, -1] = -100\n return targets\n \n def forward(self, batch):\n input_ids = batch['input_ids']\n key_padding_mask = batch['attention_mask'].bool(\n ) if 'attention_mask' in batch else None\n pruned_steps = batch.get('pruned_steps', None)\n if pruned_steps is not None:\n pruned_steps = pruned_steps[0].item()\n zs = {key: batch[key] for key in batch if \"_z\" in key}\n model_output = self.model(input_ids=input_ids, key_padding_mask=key_padding_mask, pruned_steps=pruned_steps, **zs)\n return model_output\n\n def eval_forward(self, batch, outputs=None):\n return outputs if outputs is not None else self.forward(batch)\n\n def loss(self, outputs, batch):\n logits = outputs[\"logits\"]\n l0_output = outputs[\"l0_output\"]\n targets = self.get_targets(batch)\n\n loss = F.cross_entropy(logits.view(-1, logits.size(-1)),\n targets.view(-1),\n ignore_index=-100)\n return_loss = {\"ce_loss\": loss}\n if l0_output is not None:\n lag_loss = l0_output[0]\n return_loss[\"lag_loss\"] = lag_loss\n return_loss[\"total\"] = sum(return_loss.values())\n return return_loss\n\n def get_metrics(self, is_train=False):\n return self.train_metrics if is_train else self.eval_metrics\n\n def update_metric(self, batch, outputs, metric) -> None:\n logits = outputs[\"logits\"]\n if isinstance(metric, DomainLanguageCrossEntropy):\n targets = self.get_targets(batch)\n set_id = self.set_name_to_id[metric.set_name]\n targets[batch[\"set\"] != set_id] = -100\n metric.update(logits, targets)\n elif isinstance(metric, DomainCount):\n with torch.inference_mode():\n idx = None\n selected_sets = batch['set']\n metric.update(selected_sets, idx)\n else:\n logits = logits.view(-1, logits.size(-1))\n targets = self.get_targets(batch).view(-1)\n metric.update(logits, targets)\n\n def add_eval_metrics(self, evaluator):\n evaluator_metrics = {\n m: METRIC_DEFAULT_CTORS[m]() for m in evaluator.metric_names\n }\n if self.eval_metrics is not None:\n self.eval_metrics.update(evaluator_metrics)\n else:\n self.eval_metrics = evaluator_metrics\n\n def _compute_num_fwd_flops(self):\n # Might not be correct for LLaMA structures\n n_params = sum(p.numel() for p in self.parameters())\n # the number of paramters is approximately the number of multiply-accumulates (MAC) in the network\n # each MAC has 2 FLOPs - we multiply by 2 ie 2 * n_param\n # this gets us FLOPs / token\n params_flops_per_token = 2 * n_params\n params_flops_per_seq = params_flops_per_token * self.model.cfg.max_seq_len\n # there are 2 FLOPS per mac; there is A=Q*K^T and out=A*V ops (ie mult by 2)\n attn_flops_per_seq = self.model.cfg.n_layers * 2 * 2 * (\n self.model.cfg.d_model * (self.model.cfg.max_seq_len**2))\n return params_flops_per_seq + attn_flops_per_seq\n\n def flops_per_batch(self, batch):\n # Note: this computation does not take into account padding, and assumes\n # that the dataset has been constructed without padding. Additionally, we\n # assume the backward pass is approximately 2x the forward pass\n return self.num_fwd_flops * 3 * batch['input_ids'].shape[0]\n\n def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:\n if new_num_tokens is not None:\n self.model._resize_token_embeddings(new_num_tokens)" }, { "identifier": "prepare_decoder_attention_mask", "path": "llmshearing/models/composer_llama.py", "snippet": "def prepare_decoder_attention_mask(input_shape, inputs_embeds):\n # create causal mask\n # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = _make_causal_mask(input_shape, inputs_embeds.dtype).to(inputs_embeds.device)\n\n return combined_attention_mask" }, { "identifier": "turn_head_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_head_z(head_z, head_layer_z):\n head_z = head_z.squeeze().clone()\n if head_layer_z is not None:\n head_z *= head_layer_z\n to_prune_heads = torch.where(head_z == 0)[0].view(-1).tolist()\n return to_prune_heads" }, { "identifier": "turn_mlp_z", "path": "llmshearing/models/composer_llama.py", "snippet": "def turn_mlp_z(intermediate_z, mlp_z):\n intermediate_z_layer = intermediate_z.squeeze().clone()\n if mlp_z is not None:\n intermediate_z_layer *= mlp_z\n keep_intermediate_dims = torch.where(intermediate_z_layer != 0)[0].tolist()\n return keep_intermediate_dims " }, { "identifier": "normal_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def normal_attn_fn(\n query,\n key, \n value,\n attention_mask=None,\n head_z=None\n):\n bsz, n_heads, q_len, head_dim = query.shape\n dim = n_heads * head_dim\n attn_weights = torch.matmul(query, key.transpose(2, 3)) / math.sqrt(head_dim)\n attn_weights = attn_weights + attention_mask\n attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min))\n\n # upcast attention to fp32\n attn_weights = torch.nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)\n attn_output = torch.matmul(attn_weights, value) # (bsz, n_heads, q_len, head_dim)\n if head_z is not None:\n attn_output *= head_z.unsqueeze(-1)\n attn_output = attn_output.transpose(1, 2)\n attn_output = attn_output.reshape(bsz, q_len, dim)\n return attn_output" }, { "identifier": "flash_attn_fn", "path": "llmshearing/models/composer_llama.py", "snippet": "def flash_attn_fn(\n query,\n key,\n value,\n softmax_scale=None,\n attn_bias=None,\n query_padding_mask=None,\n key_padding_mask=None,\n is_causal=False,\n dropout_p=0.0,\n training=False,\n needs_weights=False,\n head_z=None,\n \n):\n try:\n from flash_attn import bert_padding # type: ignore\n from flash_attn import flash_attn_interface # type: ignore\n except ImportError as e:\n raise e\n\n # check_valid_inputs(query, key, value)\n\n if attn_bias is not None:\n raise NotImplementedError(f'attn_bias not implemented for flash attn.')\n\n batch_size, seqlen = query.shape[:2]\n\n if query_padding_mask is None:\n query_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=query.device)\n if key_padding_mask is None:\n key_padding_mask = torch.ones((batch_size, seqlen), dtype=torch.bool, device=key.device)\n\n query_unpad, indices_q, cu_seqlens_q, max_seqlen_q = bert_padding.unpad_input(\n query, query_padding_mask)\n # query_unpad = rearrange(query_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n key_unpad, _, cu_seqlens_k, max_seqlen_k = bert_padding.unpad_input(\n key, key_padding_mask)\n # key_unpad = rearrange(key_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n value_unpad, _, _, _ = bert_padding.unpad_input(value, key_padding_mask)\n # value_unpad = rearrange(value_unpad, 'nnz (h d) -> nnz h d', h=n_heads)\n\n dropout_p = dropout_p if training else 0.0\n \n output_unpad = flash_attn_interface.flash_attn_unpadded_func(\n query_unpad,\n key_unpad,\n value_unpad,\n cu_seqlens_q,\n cu_seqlens_k,\n max_seqlen_q,\n max_seqlen_k,\n dropout_p,\n softmax_scale=softmax_scale,\n causal=is_causal,\n return_attn_probs=needs_weights)\n\n if head_z is not None:\n output_unpad = output_unpad * head_z # 1 * h * 1\n output = bert_padding.pad_input(rearrange(output_unpad, 'nnz h d -> nnz (h d)'), indices_q, batch_size, seqlen)\n return output, None" } ]
import math import torch import torch.nn as nn from typing import List, Optional, Tuple from einops import rearrange from omegaconf import DictConfig from torch.nn import functional as F from transformers.pytorch_utils import (find_pruneable_heads_and_indices, prune_linear_layer) from llmshearing.models.l0_module import L0Module from llmshearing.models.composer_llama import ComposerMosaicLlama, prepare_decoder_attention_mask, turn_head_z, turn_mlp_z, normal_attn_fn, flash_attn_fn from transformers.models.gpt_neox.modeling_gpt_neox import apply_rotary_pos_emb
10,801
def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits)
class ComposerMosaicPythia(ComposerMosaicLlama): def __init__(self, cfg): super().__init__(cfg) self.model = PythiaModel(cfg) class CoFiLayerNorm(torch.nn.LayerNorm): def __init__(self, normalized_shape, eps: float = 1e-5, elementwise_affine: bool = True, device=None) -> None: super().__init__(normalized_shape, eps, elementwise_affine, device) def forward(self, input, hidden_z=None): if hidden_z is not None: remaining_index = torch.where(~hidden_z.eq(0))[0] compressed_input = torch.index_select( input, dim=-1, index=remaining_index) compressed_weight = self.weight[remaining_index] compressed_bias = self.bias[remaining_index] normalized_shape = len(remaining_index) normed_input = F.layer_norm( compressed_input, [normalized_shape], compressed_weight, compressed_bias, self.eps) output = input.clone() normed_input = normed_input.to(output.dtype) output[..., remaining_index] = normed_input else: output = F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) return output def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] # self.weight = torch.nn.Parameter(self.weight.data.mul(hidden_z.squeeze())[remaining_index]) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(0, remaining_index)) self.bias = torch.nn.parameter.Parameter(self.bias.index_select(0, remaining_index)) self.normalized_shape = (len(remaining_index),) class PythiaEmbedding(nn.Embedding): def forward(self, input, hidden_z=None): embeddings = super().forward(input) if hidden_z is not None: embeddings = embeddings.mul(hidden_z) return embeddings def prune_params(self, hidden_z): remaining_index = torch.where(~hidden_z.eq(0))[0] self.weight.data = self.weight.data.mul(hidden_z) self.weight = torch.nn.parameter.Parameter(self.weight.index_select(1, remaining_index).clone()) self.embedding_dim = len(remaining_index) print(f" Embedding: {len(hidden_z)} -> {len(remaining_index)}") class PythiaModel(nn.Module): def __init__(self, cfg: DictConfig): super().__init__() print(f'Tried to build Pythia model with cfg.name={cfg.name}') self.cfg = cfg ### added ### self.l0_module = None if getattr(self.cfg, "l0_module", None) is not None: self.l0_module = L0Module(self.cfg, device=cfg.init_device) ############# layernorm_class = CoFiLayerNorm self.attn_impl = cfg.attn_impl self.embedding_fraction = cfg.get('embedding_fraction', 1) assert 0 < self.embedding_fraction <= 1, 'model.embedding_fraction must be between 0 (exclusive) and 1 (inclusive)!' self.transformer = nn.ModuleDict({ "wte": PythiaEmbedding(cfg.vocab_size, cfg.d_model, device=cfg.init_device), }) self.transformer.update({ 'blocks': nn.ModuleList([ PythiaBlock(cfg, device=cfg.init_device) for _ in range(cfg.n_layers) ]) }) self.transformer.update({ "output": nn.Linear(cfg.d_model, cfg.vocab_size, device=cfg.init_device, bias=False), }) self.transformer.update({ "ln_f": layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=cfg.init_device), # TODO: add to config }) self.is_causal = True if cfg.get('verbose') and cfg.get('verbose') > 2: print(self) def prune_params(self, zs=None): # TODO if zs is None: self.l0_module.eval() zs = self.l0_module(calculate_lagrangian=False) # wte as well :) # ln_f if hidden states are to be pruned if "hidden_z" in zs: hidden_z = zs["hidden_z"] remaining_index = torch.where(~hidden_z.eq(0))[0] self.transformer.ln_f.prune_params(hidden_z) self.transformer.wte.weight.data = self.transformer.wte.weight.data.mul(hidden_z) self.transformer.wte.weight = torch.nn.parameter.Parameter( self.transformer.wte.weight.index_select(1, remaining_index).clone()) self.transformer.wte.embedding_dim = len(remaining_index) # self.transformer.output.weight.data = self.transformer.output.weight.data.mul(hidden_z) half = self.transformer.output.weight.data.dtype == torch.float16 self.transformer.output = prune_linear_layer(self.transformer.output, remaining_index, dim=1) if half: self.transformer.output = self.transformer.output.half() for i, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, i) block.prune_params(zs_block) def get_zs_block(self, zs, block_idx): zs_block = {} if zs is not None: for key in zs: if key == "hidden_z": zs_block["hidden_z"] = zs["hidden_z"] else: zs_block[key] = zs[key][block_idx] return zs_block def forward( self, input_ids: torch.LongTensor, key_padding_mask: Optional[torch.ByteTensor] = None, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, pruned_steps: int = 0, retain_grad: bool = False, **zs,): S = input_ids.size(1) assert S <= self.cfg.max_seq_len, f"Sequence length ({S}) exceeds model maximum sequence length ({self.cfg.max_seq_len})!" tok_emb = self.transformer.wte(input_ids) if "hidden_z" in zs: tok_emb = tok_emb.mul(zs["hidden_z"]) x = tok_emb attn_bias = None # only consider the flash attention case attention_mask = prepare_decoder_attention_mask((tok_emb.size(0), tok_emb.size(1)), tok_emb) l0_output = None if self.l0_module is not None: assert zs == {}, "zs should be empty when using L0Module" zs = self.l0_module(calculate_lagrangian=False, pruned_steps=pruned_steps) for b_idx, block in enumerate(self.transformer.blocks): zs_block = self.get_zs_block(zs, b_idx) past_key_value = past_key_values[ b_idx] if past_key_values is not None else None x, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=self.is_causal, attention_mask=attention_mask, retain_grad=retain_grad, **zs_block ) if past_key_values is not None: past_key_values[b_idx] = past_key_value x = self.transformer.ln_f(x, hidden_z=zs.get("hidden_z", None)) logits = self.transformer.output(x) if self.l0_module is not None: l0_output = self.l0_module(calculate_lagrangian=True, pruned_steps=pruned_steps) return {"logits": logits, "l0_output": l0_output, "zs": zs} def param_init_fn(self, module): pass def fsdp_wrap_fn(self, module): return isinstance(module, PythiaBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module): return isinstance(module, PythiaBlock) class PythiaBlock(nn.Module): def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() layernorm_class = CoFiLayerNorm # TODO: CoFiLayerNorm,RMSLayerNorm self.ln_1 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.attn = PythiaAttention(cfg, device) self.ln_2 = layernorm_class(cfg.d_model, eps=cfg.layer_norm_eps, device=device) self.mlp = PythiaMLP(cfg, device) self.use_parallel_residual = cfg.get('use_parallel_residual', False) # TODO: add to config def prune_params(self, zs_block): self.attn.prune_params(zs_block) self.mlp.prune_params(zs_block) if self.attn.query_key_value is None: self.ln_1 = None if self.mlp.up_proj is None: self.ln_2 = None if "hidden_z" in zs_block: hidden_z = zs_block["hidden_z"] if self.ln_1 is not None: self.ln_1.prune_params(hidden_z) if self.ln_2 is not None: self.ln_2.prune_params(hidden_z) def forward( self, x: torch.Tensor, past_key_value: Optional[Tuple[torch.Tensor]] = None, attn_bias: Optional[torch.Tensor] = None, key_padding_mask: Optional[torch.ByteTensor] = None, is_causal: bool = True, attention_mask: Optional[torch.Tensor] = None, retain_grad: bool = False, head_z: Optional[torch.Tensor] = None, head_layer_z: Optional[torch.Tensor] = None, intermediate_z: Optional[torch.Tensor] = None, mlp_z: Optional[torch.Tensor] = None, hidden_z: Optional[torch.Tensor] = None, qk_head_dim_z: Optional[torch.Tensor] = None, vo_head_dim_z: Optional[torch.Tensor] = None, ) -> Tuple[torch.Tensor, Optional[Tuple[torch.Tensor]]]: if self.ln_1 is not None: a = self.ln_1(x, hidden_z=hidden_z) attn_output, _, past_key_value = self.attn(a, past_key_value=past_key_value, attn_bias=attn_bias, key_padding_mask=key_padding_mask, is_causal=is_causal, attention_mask=attention_mask, retain_grad=retain_grad, head_z=head_z, head_layer_z=head_layer_z, hidden_z=hidden_z, qk_head_dim_z=qk_head_dim_z, vo_head_dim_z=vo_head_dim_z) else: attn_output = 0 if self.use_parallel_residual: # pseudocode: # x = x + attn(ln1(x)) + mlp(ln2(x)) if self.ln_2 is not None: b = self.ln_2(x, hidden_z=hidden_z) mlp_output = self.mlp(b, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output + x else: x = attn_output + x else: # pseudocode: # x = x + attn(ln1(x)) # x = x + mlp(ln2(x)) if self.ln_2 is not None: attn_output = x + attn_output hidden_states = self.ln_2(attn_output, hidden_z=hidden_z) mlp_output = self.mlp(hidden_states, retain_grad, intermediate_z, mlp_z, hidden_z) x = mlp_output + attn_output else: x = x + attn_output return x, past_key_value class PythiaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, cfg: DictConfig, device: Optional[str] = None): super().__init__() self.attn_impl = cfg.get('attn_impl') self.d_model = cfg.d_model self.n_heads = cfg.n_heads self.all_head_size = cfg.d_model self.head_dim = self.d_model // self.n_heads self.pruned_heads = set() self.softmax_scale = cfg.get('softmax_scale') if self.softmax_scale is None: self.softmax_scale = 1 / math.sqrt(self.d_model / self.n_heads) self.attn_dropout_p = cfg.get('attn_pdrop') # self.Wqkv = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=False) # for param init fn; enables shape based init of fused layers # fuse_splits = (cfg.d_model, 2 * cfg.d_model) # self.Wqkv._fused = (0, fuse_splits) # type: ignore self.query_key_value = nn.Linear(self.d_model, 3 * self.d_model, device=device, bias=True) fuse_splits = (cfg.d_model, 2 * cfg.d_model) self.query_key_value._fused = (0, fuse_splits)
self.attn_fn = flash_attn_fn if self.attn_impl == 'flash' else normal_attn_fn
6
2023-10-16 12:26:08+00:00
16k
hkchengrex/Cutie
cutie/inference/inference_core.py
[ { "identifier": "MemoryManager", "path": "cutie/inference/memory_manager.py", "snippet": "class MemoryManager:\n \"\"\"\n Manages all three memory stores and the transition between working/long-term memory\n \"\"\"\n def __init__(self, cfg: DictConfig, object_manager: ObjectManager):\n self.object_manager = object_manager\n self.sensory_dim = cfg.model.sensory_dim\n self.top_k = cfg.top_k\n self.chunk_size = cfg.chunk_size\n\n self.save_aux = cfg.save_aux\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n # subtract 1 because the first-frame is now counted as \"permanent memory\"\n # and is not counted towards max_mem_frames\n # but we want to keep the hyperparameters consistent as before for the same behavior\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n # dimensions will be inferred from input later\n self.CK = self.CV = None\n self.H = self.W = None\n\n # The sensory memory is stored as a dictionary indexed by object ids\n # each of shape bs * C^h * H * W\n self.sensory = {}\n\n # a dictionary indexed by object ids, each of shape bs * T * Q * C\n self.obj_v = {}\n\n self.work_mem = KeyValueMemoryStore(save_selection=self.use_long_term,\n save_usage=self.use_long_term)\n if self.use_long_term:\n self.long_mem = KeyValueMemoryStore(save_usage=self.count_long_term_usage)\n\n self.config_stale = True\n self.engaged = False\n\n def update_config(self, cfg: DictConfig) -> None:\n self.config_stale = True\n self.top_k = cfg['top_k']\n\n assert self.use_long_term == cfg.use_long_term, 'cannot update this'\n assert self.count_long_term_usage == cfg.long_term.count_usage, 'cannot update this'\n\n self.use_long_term = cfg.use_long_term\n self.count_long_term_usage = cfg.long_term.count_usage\n if self.use_long_term:\n self.max_mem_frames = cfg.long_term.max_mem_frames - 1\n self.min_mem_frames = cfg.long_term.min_mem_frames - 1\n self.num_prototypes = cfg.long_term.num_prototypes\n self.max_long_tokens = cfg.long_term.max_num_tokens\n self.buffer_tokens = cfg.long_term.buffer_tokens\n else:\n self.max_mem_frames = cfg.max_mem_frames - 1\n\n def _readout(self, affinity, v) -> torch.Tensor:\n # affinity: bs*N*HW\n # v: bs*C*N or bs*num_objects*C*N\n # returns bs*C*HW or bs*num_objects*C*HW\n if len(v.shape) == 3:\n # single object\n return v @ affinity\n else:\n bs, num_objects, C, N = v.shape\n v = v.view(bs, num_objects * C, N)\n out = v @ affinity\n return out.view(bs, num_objects, C, -1)\n\n def _get_mask_by_ids(self, mask: torch.Tensor, obj_ids: List[int]) -> torch.Tensor:\n # -1 because the mask does not contain the background channel\n return mask[:, [self.object_manager.find_tmp_by_id(obj) - 1 for obj in obj_ids]]\n\n def _get_sensory_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.sensory[obj] for obj in obj_ids], dim=1)\n\n def _get_object_mem_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n return torch.stack([self.obj_v[obj] for obj in obj_ids], dim=1)\n\n def _get_visual_values_by_ids(self, obj_ids: List[int]) -> torch.Tensor:\n # All the values that the object ids refer to should have the same shape\n value = torch.stack([self.work_mem.value[obj] for obj in obj_ids], dim=1)\n if self.use_long_term and obj_ids[0] in self.long_mem.value:\n lt_value = torch.stack([self.long_mem.value[obj] for obj in obj_ids], dim=1)\n value = torch.cat([lt_value, value], dim=-1)\n\n return value\n\n def read(self, pix_feat: torch.Tensor, query_key: torch.Tensor, selection: torch.Tensor,\n last_mask: torch.Tensor, network: CUTIE) -> Dict[int, torch.Tensor]:\n \"\"\"\n Read from all memory stores and returns a single memory readout tensor for each object\n\n pix_feat: (1/2) x C x H x W\n query_key: (1/2) x C^k x H x W\n selection: (1/2) x C^k x H x W\n last_mask: (1/2) x num_objects x H x W (at stride 16)\n return a dict of memory readouts, indexed by object indices. Each readout is C*H*W\n \"\"\"\n h, w = pix_feat.shape[-2:]\n bs = pix_feat.shape[0]\n assert query_key.shape[0] == bs\n assert selection.shape[0] == bs\n assert last_mask.shape[0] == bs\n\n query_key = query_key.flatten(start_dim=2) # bs*C^k*HW\n selection = selection.flatten(start_dim=2) # bs*C^k*HW\n \"\"\"\n Compute affinity and perform readout\n \"\"\"\n all_readout_mem = {}\n buckets = self.work_mem.buckets\n for bucket_id, bucket in buckets.items():\n if self.use_long_term and self.long_mem.engaged(bucket_id):\n # Use long-term memory\n long_mem_size = self.long_mem.size(bucket_id)\n memory_key = torch.cat([self.long_mem.key[bucket_id], self.work_mem.key[bucket_id]],\n -1)\n shrinkage = torch.cat(\n [self.long_mem.shrinkage[bucket_id], self.work_mem.shrinkage[bucket_id]], -1)\n\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n \"\"\"\n Record memory usage for working and long-term memory\n \"\"\"\n # ignore the index return for long-term memory\n work_usage = usage[:, long_mem_size:]\n self.work_mem.update_bucket_usage(bucket_id, work_usage)\n\n if self.count_long_term_usage:\n # ignore the index return for working memory\n long_usage = usage[:, :long_mem_size]\n self.long_mem.update_bucket_usage(bucket_id, long_usage)\n else:\n # no long-term memory\n memory_key = self.work_mem.key[bucket_id]\n shrinkage = self.work_mem.shrinkage[bucket_id]\n similarity = get_similarity(memory_key, shrinkage, query_key, selection)\n\n if self.use_long_term:\n affinity, usage = do_softmax(similarity,\n top_k=self.top_k,\n inplace=True,\n return_usage=True)\n self.work_mem.update_bucket_usage(bucket_id, usage)\n else:\n affinity = do_softmax(similarity, top_k=self.top_k, inplace=True)\n\n if self.chunk_size < 1:\n object_chunks = [bucket]\n else:\n object_chunks = [\n bucket[i:i + self.chunk_size] for i in range(0, len(bucket), self.chunk_size)\n ]\n\n for objects in object_chunks:\n this_sensory = self._get_sensory_by_ids(objects)\n this_last_mask = self._get_mask_by_ids(last_mask, objects)\n this_msk_value = self._get_visual_values_by_ids(objects) # (1/2)*num_objects*C*N\n visual_readout = self._readout(affinity,\n this_msk_value).view(bs, len(objects), self.CV, h, w)\n pixel_readout = network.pixel_fusion(pix_feat, visual_readout, this_sensory,\n this_last_mask)\n this_obj_mem = self._get_object_mem_by_ids(objects).unsqueeze(2)\n readout_memory, aux_features = network.readout_query(pixel_readout, this_obj_mem)\n for i, obj in enumerate(objects):\n all_readout_mem[obj] = readout_memory[:, i]\n\n if self.save_aux:\n aux_output = {\n 'sensory': this_sensory,\n 'pixel_readout': pixel_readout,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'q_weights': aux_features['q_weights'] if aux_features else None,\n 'p_weights': aux_features['p_weights'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'].float() if aux_features else None,\n }\n self.aux = aux_output\n\n return all_readout_mem\n\n def add_memory(self,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n msk_value: torch.Tensor,\n obj_value: torch.Tensor,\n objects: List[int],\n selection: torch.Tensor = None,\n *,\n as_permanent: bool = False) -> None:\n # key: (1/2)*C*H*W\n # msk_value: (1/2)*num_objects*C*H*W\n # obj_value: (1/2)*num_objects*Q*C\n # objects contains a list of object ids corresponding to the objects in msk_value/obj_value\n bs = key.shape[0]\n assert shrinkage.shape[0] == bs\n assert msk_value.shape[0] == bs\n assert obj_value.shape[0] == bs\n\n self.engaged = True\n if self.H is None or self.config_stale:\n self.config_stale = False\n self.H, self.W = msk_value.shape[-2:]\n self.HW = self.H * self.W\n # convert from num. frames to num. tokens\n self.max_work_tokens = self.max_mem_frames * self.HW\n if self.use_long_term:\n self.min_work_tokens = self.min_mem_frames * self.HW\n\n # key: bs*C*N\n # value: bs*num_objects*C*N\n key = key.flatten(start_dim=2)\n shrinkage = shrinkage.flatten(start_dim=2)\n self.CK = key.shape[1]\n\n msk_value = msk_value.flatten(start_dim=3)\n self.CV = msk_value.shape[2]\n\n if selection is not None:\n # not used in non-long-term mode\n selection = selection.flatten(start_dim=2)\n\n # insert object values into object memory\n for obj_id, obj in enumerate(objects):\n if obj in self.obj_v:\n \"\"\"streaming average\n each self.obj_v[obj] is (1/2)*num_summaries*(embed_dim+1)\n first embed_dim keeps track of the sum of embeddings\n the last dim keeps the total count\n averaging in done inside the object transformer\n\n incoming obj_value is (1/2)*num_objects*num_summaries*(embed_dim+1)\n self.obj_v[obj] = torch.cat([self.obj_v[obj], obj_value[:, obj_id]], dim=0)\n \"\"\"\n last_acc = self.obj_v[obj][:, :, -1]\n new_acc = last_acc + obj_value[:, obj_id, :, -1]\n\n self.obj_v[obj][:, :, :-1] = (self.obj_v[obj][:, :, :-1] +\n obj_value[:, obj_id, :, :-1])\n self.obj_v[obj][:, :, -1] = new_acc\n else:\n self.obj_v[obj] = obj_value[:, obj_id]\n\n # convert mask value tensor into a dict for insertion\n msk_values = {obj: msk_value[:, obj_id] for obj_id, obj in enumerate(objects)}\n self.work_mem.add(key,\n msk_values,\n shrinkage,\n selection=selection,\n as_permanent=as_permanent)\n\n for bucket_id in self.work_mem.buckets.keys():\n # long-term memory cleanup\n if self.use_long_term:\n # Do memory compressed if needed\n if self.work_mem.non_perm_size(bucket_id) >= self.max_work_tokens:\n # Remove obsolete features if needed\n if self.long_mem.non_perm_size(bucket_id) >= (self.max_long_tokens -\n self.num_prototypes):\n self.long_mem.remove_obsolete_features(\n bucket_id,\n self.max_long_tokens - self.num_prototypes - self.buffer_tokens)\n\n self.compress_features(bucket_id)\n else:\n # FIFO\n self.work_mem.remove_old_memory(bucket_id, self.max_work_tokens)\n\n def purge_except(self, obj_keep_idx: List[int]) -> None:\n # purge certain objects from the memory except the one listed\n self.work_mem.purge_except(obj_keep_idx)\n if self.use_long_term and self.long_mem.engaged():\n self.long_mem.purge_except(obj_keep_idx)\n self.sensory = {k: v for k, v in self.sensory.items() if k in obj_keep_idx}\n\n if not self.work_mem.engaged():\n # everything is removed!\n self.engaged = False\n\n def compress_features(self, bucket_id: int) -> None:\n HW = self.HW\n\n # perform memory consolidation\n prototype_key, prototype_value, prototype_shrinkage = self.consolidation(\n *self.work_mem.get_all_sliced(bucket_id, 0, -self.min_work_tokens))\n\n # remove consolidated working memory\n self.work_mem.sieve_by_range(bucket_id,\n 0,\n -self.min_work_tokens,\n min_size=self.min_work_tokens)\n\n # add to long-term memory\n self.long_mem.add(prototype_key,\n prototype_value,\n prototype_shrinkage,\n selection=None,\n supposed_bucket_id=bucket_id)\n\n def consolidation(self, candidate_key: torch.Tensor, candidate_shrinkage: torch.Tensor,\n candidate_selection: torch.Tensor, candidate_value: Dict[int, torch.Tensor],\n usage: torch.Tensor) -> (torch.Tensor, Dict[int, torch.Tensor], torch.Tensor):\n # find the indices with max usage\n bs = candidate_key.shape[0]\n assert bs in [1, 2]\n\n prototype_key = []\n prototype_selection = []\n for bi in range(bs):\n _, max_usage_indices = torch.topk(usage[bi], k=self.num_prototypes, dim=-1, sorted=True)\n prototype_indices = max_usage_indices.flatten()\n prototype_key.append(candidate_key[bi, :, prototype_indices])\n prototype_selection.append(candidate_selection[bi, :, prototype_indices])\n prototype_key = torch.stack(prototype_key, dim=0)\n prototype_selection = torch.stack(prototype_selection, dim=0)\n \"\"\"\n Potentiation step\n \"\"\"\n similarity = get_similarity(candidate_key, candidate_shrinkage, prototype_key,\n prototype_selection)\n affinity = do_softmax(similarity)\n\n # readout the values\n prototype_value = {k: self._readout(affinity, v) for k, v in candidate_value.items()}\n\n # readout the shrinkage term\n prototype_shrinkage = self._readout(affinity, candidate_shrinkage)\n\n return prototype_key, prototype_value, prototype_shrinkage\n\n def initialize_sensory_if_needed(self, sample_key: torch.Tensor, ids: List[int]):\n for obj in ids:\n if obj not in self.sensory:\n # also initializes the sensory memory\n bs, _, h, w = sample_key.shape\n self.sensory[obj] = torch.zeros((bs, self.sensory_dim, h, w),\n device=sample_key.device)\n\n def update_sensory(self, sensory: torch.Tensor, ids: List[int]):\n # sensory: 1*num_objects*C*H*W\n for obj_id, obj in enumerate(ids):\n self.sensory[obj] = sensory[:, obj_id]\n\n def get_sensory(self, ids: List[int]):\n # returns (1/2)*num_objects*C*H*W\n return self._get_sensory_by_ids(ids)\n \n def clear_non_permanent_memory(self):\n self.work_mem.clear_non_permanent_memory()\n if self.use_long_term:\n self.long_mem.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.sensory = {}" }, { "identifier": "ObjectManager", "path": "cutie/inference/object_manager.py", "snippet": "class ObjectManager:\n \"\"\"\n Object IDs are immutable. The same ID always represent the same object.\n Temporary IDs are the positions of each object in the tensor. It changes as objects get removed.\n Temporary IDs start from 1.\n \"\"\"\n def __init__(self):\n self.obj_to_tmp_id: Dict[ObjectInfo, int] = {}\n self.tmp_id_to_obj: Dict[int, ObjectInfo] = {}\n self.obj_id_to_obj: Dict[int, ObjectInfo] = {}\n\n self.all_historical_object_ids: List[int] = []\n\n def _recompute_obj_id_to_obj_mapping(self) -> None:\n self.obj_id_to_obj = {obj.id: obj for obj in self.obj_to_tmp_id}\n\n def add_new_objects(\n self, objects: Union[List[ObjectInfo], ObjectInfo,\n List[int]]) -> (List[int], List[int]):\n if not isinstance(objects, list):\n objects = [objects]\n\n corresponding_tmp_ids = []\n corresponding_obj_ids = []\n for obj in objects:\n if isinstance(obj, int):\n obj = ObjectInfo(id=obj)\n\n if obj in self.obj_to_tmp_id:\n # old object\n corresponding_tmp_ids.append(self.obj_to_tmp_id[obj])\n corresponding_obj_ids.append(obj.id)\n else:\n # new object\n new_obj = ObjectInfo(id=obj.id)\n\n # new object\n new_tmp_id = len(self.obj_to_tmp_id) + 1\n self.obj_to_tmp_id[new_obj] = new_tmp_id\n self.tmp_id_to_obj[new_tmp_id] = new_obj\n self.all_historical_object_ids.append(new_obj.id)\n corresponding_tmp_ids.append(new_tmp_id)\n corresponding_obj_ids.append(new_obj.id)\n\n self._recompute_obj_id_to_obj_mapping()\n assert corresponding_tmp_ids == sorted(corresponding_tmp_ids)\n return corresponding_tmp_ids, corresponding_obj_ids\n\n def delete_object(self, obj_ids_to_remove: Union[int, List[int]]) -> None:\n # delete an object or a list of objects\n # re-sort the tmp ids\n if isinstance(obj_ids_to_remove, int):\n obj_ids_to_remove = [obj_ids_to_remove]\n\n new_tmp_id = 1\n total_num_id = len(self.obj_to_tmp_id)\n\n local_obj_to_tmp_id = {}\n local_tmp_to_obj_id = {}\n\n for tmp_iter in range(1, total_num_id + 1):\n obj = self.tmp_id_to_obj[tmp_iter]\n if obj.id not in obj_ids_to_remove:\n local_obj_to_tmp_id[obj] = new_tmp_id\n local_tmp_to_obj_id[new_tmp_id] = obj\n new_tmp_id += 1\n\n self.obj_to_tmp_id = local_obj_to_tmp_id\n self.tmp_id_to_obj = local_tmp_to_obj_id\n self._recompute_obj_id_to_obj_mapping()\n\n def purge_inactive_objects(self,\n max_missed_detection_count: int) -> (bool, List[int], List[int]):\n # remove tmp ids of objects that are removed\n obj_id_to_be_deleted = []\n tmp_id_to_be_deleted = []\n tmp_id_to_keep = []\n obj_id_to_keep = []\n\n for obj in self.obj_to_tmp_id:\n if obj.poke_count > max_missed_detection_count:\n obj_id_to_be_deleted.append(obj.id)\n tmp_id_to_be_deleted.append(self.obj_to_tmp_id[obj])\n else:\n tmp_id_to_keep.append(self.obj_to_tmp_id[obj])\n obj_id_to_keep.append(obj.id)\n\n purge_activated = len(obj_id_to_be_deleted) > 0\n if purge_activated:\n self.delete_object(obj_id_to_be_deleted)\n return purge_activated, tmp_id_to_keep, obj_id_to_keep\n\n def tmp_to_obj_cls(self, mask) -> torch.Tensor:\n # remap tmp id cls representation to the true object id representation\n new_mask = torch.zeros_like(mask)\n for tmp_id, obj in self.tmp_id_to_obj.items():\n new_mask[mask == tmp_id] = obj.id\n return new_mask\n\n def get_tmp_to_obj_mapping(self) -> Dict[int, ObjectInfo]:\n # returns the mapping in a dict format for saving it with pickle\n return {obj.id: tmp_id for obj, tmp_id in self.tmp_id_to_obj.items()}\n\n def realize_dict(self, obj_dict, dim=1) -> torch.Tensor:\n # turns a dict indexed by obj id into a tensor, ordered by tmp IDs\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n if obj.id not in obj_dict:\n raise NotImplementedError\n output.append(obj_dict[obj.id])\n output = torch.stack(output, dim=dim)\n return output\n\n def make_one_hot(self, cls_mask) -> torch.Tensor:\n output = []\n for _, obj in self.tmp_id_to_obj.items():\n output.append(cls_mask == obj.id)\n if len(output) == 0:\n output = torch.zeros((0, *cls_mask.shape), dtype=torch.bool, device=cls_mask.device)\n else:\n output = torch.stack(output, dim=0)\n return output\n\n @property\n def all_obj_ids(self) -> List[int]:\n return [k.id for k in self.obj_to_tmp_id]\n\n @property\n def num_obj(self) -> int:\n return len(self.obj_to_tmp_id)\n\n def has_all(self, objects: List[int]) -> bool:\n for obj in objects:\n if obj not in self.obj_to_tmp_id:\n return False\n return True\n\n def find_object_by_id(self, obj_id) -> ObjectInfo:\n return self.obj_id_to_obj[obj_id]\n\n def find_tmp_by_id(self, obj_id) -> int:\n return self.obj_to_tmp_id[self.obj_id_to_obj[obj_id]]" }, { "identifier": "ImageFeatureStore", "path": "cutie/inference/image_feature_store.py", "snippet": "class ImageFeatureStore:\n \"\"\"\n A cache for image features.\n These features might be reused at different parts of the inference pipeline.\n This class provide an interface for reusing these features.\n It is the user's responsibility to delete redundant features.\n\n Feature of a frame should be associated with a unique index -- typically the frame id.\n \"\"\"\n def __init__(self, network: CUTIE, no_warning: bool = False):\n self.network = network\n self._store = {}\n self.no_warning = no_warning\n\n def _encode_feature(self, index: int, image: torch.Tensor) -> None:\n ms_features, pix_feat = self.network.encode_image(image)\n key, shrinkage, selection = self.network.transform_key(ms_features[0])\n self._store[index] = (ms_features, pix_feat, key, shrinkage, selection)\n\n def get_features(self, index: int,\n image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][:2]\n\n def get_key(self, index: int,\n image: torch.Tensor) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n if index not in self._store:\n self._encode_feature(index, image)\n\n return self._store[index][2:]\n\n def delete(self, index: int) -> None:\n if index in self._store:\n del self._store[index]\n\n def __len__(self):\n return len(self._store)\n\n def __del__(self):\n if len(self._store) > 0 and not self.no_warning:\n warnings.warn(f'Leaking {self._store.keys()} in the image feature store')" }, { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "pad_divide_by", "path": "cutie/utils/tensor_utils.py", "snippet": "def pad_divide_by(in_img: torch.Tensor, d: int) -> (torch.Tensor, Iterable[int]):\n h, w = in_img.shape[-2:]\n\n if h % d > 0:\n new_h = h + d - h % d\n else:\n new_h = h\n if w % d > 0:\n new_w = w + d - w % d\n else:\n new_w = w\n lh, uh = int((new_h - h) / 2), int(new_h - h) - int((new_h - h) / 2)\n lw, uw = int((new_w - w) / 2), int(new_w - w) - int((new_w - w) / 2)\n pad_array = (int(lw), int(uw), int(lh), int(uh))\n out = F.pad(in_img, pad_array)\n return out, pad_array" }, { "identifier": "unpad", "path": "cutie/utils/tensor_utils.py", "snippet": "def unpad(img: torch.Tensor, pad: Iterable[int]) -> torch.Tensor:\n if len(img.shape) == 4:\n if pad[2] + pad[3] > 0:\n img = img[:, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, pad[0]:-pad[1]]\n elif len(img.shape) == 3:\n if pad[2] + pad[3] > 0:\n img = img[:, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, pad[0]:-pad[1]]\n elif len(img.shape) == 5:\n if pad[2] + pad[3] > 0:\n img = img[:, :, :, pad[2]:-pad[3], :]\n if pad[0] + pad[1] > 0:\n img = img[:, :, :, :, pad[0]:-pad[1]]\n else:\n raise NotImplementedError\n return img" }, { "identifier": "aggregate", "path": "cutie/utils/tensor_utils.py", "snippet": "def aggregate(prob: torch.Tensor, dim: int) -> torch.Tensor:\n with torch.cuda.amp.autocast(enabled=False):\n prob = prob.float()\n new_prob = torch.cat([torch.prod(1 - prob, dim=dim, keepdim=True), prob],\n dim).clamp(1e-7, 1 - 1e-7)\n logits = torch.log((new_prob / (1 - new_prob)))\n\n return logits" } ]
from typing import List, Optional, Iterable, Dict from omegaconf import DictConfig from cutie.inference.memory_manager import MemoryManager from cutie.inference.object_manager import ObjectManager from cutie.inference.image_feature_store import ImageFeatureStore from cutie.model.cutie import CUTIE from cutie.utils.tensor_utils import pad_divide_by, unpad, aggregate import logging import numpy as np import torch import torch.nn.functional as F
11,758
force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1 image, self.pad = pad_divide_by(image, 16) image = image.unsqueeze(0) # add the batch dimension if self.flip_aug: image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0) # whether to update the working memory is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) # segment when there is no input mask or when the input mask is incomplete need_segment = (mask is None) or (self.object_manager.num_obj > 0 and not self.object_manager.has_all(objects)) update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end) # encoding the image ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image) key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image) # segmentation from memory if needed if need_segment: pred_prob_with_bg = self._segment(key, selection, pix_feat, ms_feat, update_sensory=update_sensory) # use the input mask if provided if mask is not None: # inform the manager of the new objects, and get a list of temporary id # temporary ids -- indicates the position of objects in the tensor # (starts with 1 due to the background channel) corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects) mask, _ = pad_divide_by(mask, 16) if need_segment: # merge predicted mask with the incomplete input mask pred_prob_no_bg = pred_prob_with_bg[1:] # use the mutual exclusivity of segmentation if idx_mask: pred_prob_no_bg[:, mask > 0] = 0 else: pred_prob_no_bg[:, mask.max(0) > 0.5] = 0 new_masks = [] for mask_id, tmp_id in enumerate(corresponding_tmp_ids): if idx_mask: this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg) else: this_mask = mask[tmp_id] if tmp_id > pred_prob_no_bg.shape[0]: new_masks.append(this_mask.unsqueeze(0)) else: # +1 for padding the background channel pred_prob_no_bg[tmp_id - 1] = this_mask # new_masks are always in the order of tmp_id mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0) elif idx_mask: # simply convert cls to one-hot representation if len(objects) == 0: if delete_buffer: self.image_feature_store.delete(self.curr_ti) log.warn('Trying to insert an empty mask as memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) mask = torch.stack( [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)], dim=0)
log = logging.getLogger() class InferenceCore: def __init__(self, network: CUTIE, cfg: DictConfig, *, image_feature_store: ImageFeatureStore = None): self.network = network self.cfg = cfg self.mem_every = cfg.mem_every stagger_updates = cfg.stagger_updates self.chunk_size = cfg.chunk_size self.save_aux = cfg.save_aux self.max_internal_size = cfg.max_internal_size self.flip_aug = cfg.flip_aug self.curr_ti = -1 self.last_mem_ti = 0 # at which time indices should we update the sensory memory if stagger_updates >= self.mem_every: self.stagger_ti = set(range(1, self.mem_every + 1)) else: self.stagger_ti = set( np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int)) self.object_manager = ObjectManager() self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager) if image_feature_store is None: self.image_feature_store = ImageFeatureStore(self.network) else: self.image_feature_store = image_feature_store self.last_mask = None def clear_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager) def clear_non_permanent_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_non_permanent_memory() def clear_sensory_memory(self): self.curr_ti = -1 self.last_mem_ti = 0 self.memory.clear_sensory_memory() def update_config(self, cfg): self.mem_every = cfg['mem_every'] self.memory.update_config(cfg) def _add_memory(self, image: torch.Tensor, pix_feat: torch.Tensor, prob: torch.Tensor, key: torch.Tensor, shrinkage: torch.Tensor, selection: torch.Tensor, *, is_deep_update: bool = True, force_permanent: bool = False) -> None: """ Memorize the given segmentation in all memory stores. The batch dimension is 1 if flip augmentation is not used. image: RGB image, (1/2)*3*H*W pix_feat: from the key encoder, (1/2)*_*H*W prob: (1/2)*num_objects*H*W, in [0, 1] key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W selection can be None if not using long-term memory is_deep_update: whether to use deep update (e.g. with the mask encoder) force_permanent: whether to force the memory to be permanent """ if prob.shape[1] == 0: # nothing to add log.warn('Trying to add an empty object mask to memory!') return if force_permanent: as_permanent = 'all' else: as_permanent = 'first' self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids) msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask( image, pix_feat, self.memory.get_sensory(self.object_manager.all_obj_ids), prob, deep_update=is_deep_update, chunk_size=self.chunk_size, need_weights=self.save_aux) self.memory.add_memory(key, shrinkage, msk_value, obj_value, self.object_manager.all_obj_ids, selection=selection, as_permanent=as_permanent) self.last_mem_ti = self.curr_ti if is_deep_update: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) def _segment(self, key: torch.Tensor, selection: torch.Tensor, pix_feat: torch.Tensor, ms_features: Iterable[torch.Tensor], update_sensory: bool = True) -> torch.Tensor: """ Produce a segmentation using the given features and the memory The batch dimension is 1 if flip augmentation is not used. key/selection: for anisotropic l2: (1/2) * _ * H * W pix_feat: from the key encoder, (1/2) * _ * H * W ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W with strides 16, 8, and 4 respectively update_sensory: whether to update the sensory memory Returns: (num_objects+1)*H*W normalized probability; the first channel is the background """ bs = key.shape[0] if self.flip_aug: assert bs == 2 else: assert bs == 1 if not self.memory.engaged: log.warn('Trying to segment without any memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network) memory_readout = self.object_manager.realize_dict(memory_readout) sensory, _, pred_prob_with_bg = self.network.segment(ms_features, memory_readout, self.memory.get_sensory( self.object_manager.all_obj_ids), chunk_size=self.chunk_size, update_sensory=update_sensory) # remove batch dim if self.flip_aug: # average predictions of the non-flipped and flipped version pred_prob_with_bg = (pred_prob_with_bg[0] + torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2 else: pred_prob_with_bg = pred_prob_with_bg[0] if update_sensory: self.memory.update_sensory(sensory, self.object_manager.all_obj_ids) return pred_prob_with_bg def step(self, image: torch.Tensor, mask: Optional[torch.Tensor] = None, objects: Optional[List[int]] = None, *, idx_mask: bool = True, end: bool = False, delete_buffer: bool = True, force_permanent: bool = False) -> torch.Tensor: """ Take a step with a new incoming image. If there is an incoming mask with new objects, we will memorize them. If there is no incoming mask, we will segment the image using the memory. In both cases, we will update the memory and return a segmentation. image: 3*H*W mask: H*W (if idx mask) or len(objects)*H*W or None objects: list of object ids that are valid in the mask Tensor. The ids themselves do not need to be consecutive/in order, but they need to be in the same position in the list as the corresponding mask in the tensor in non-idx-mask mode. objects is ignored if the mask is None. If idx_mask is False and objects is None, we sequentially infer the object ids. idx_mask: if True, mask is expected to contain an object id at every pixel. If False, mask should have multiple channels with each channel representing one object. end: if we are at the end of the sequence, we do not need to update memory if unsure just set it to False delete_buffer: whether to delete the image feature buffer after this step force_permanent: the memory recorded this frame will be added to the permanent memory """ if objects is None and mask is not None: assert not idx_mask objects = list(range(1, mask.shape[0] + 1)) # resize input if needed -- currently only used for the GUI resize_needed = False if self.max_internal_size > 0: h, w = image.shape[-2:] min_side = min(h, w) if min_side > self.max_internal_size: resize_needed = True new_h = int(h / min_side * self.max_internal_size) new_w = int(w / min_side * self.max_internal_size) image = F.interpolate(image.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] if mask is not None: if idx_mask: mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(), size=(new_h, new_w), mode='nearest', align_corners=False)[0, 0].round().long() else: mask = F.interpolate(mask.unsqueeze(0), size=(new_h, new_w), mode='bilinear', align_corners=False)[0] self.curr_ti += 1 image, self.pad = pad_divide_by(image, 16) image = image.unsqueeze(0) # add the batch dimension if self.flip_aug: image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0) # whether to update the working memory is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or (mask is not None)) and (not end) # segment when there is no input mask or when the input mask is incomplete need_segment = (mask is None) or (self.object_manager.num_obj > 0 and not self.object_manager.has_all(objects)) update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end) # encoding the image ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image) key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image) # segmentation from memory if needed if need_segment: pred_prob_with_bg = self._segment(key, selection, pix_feat, ms_feat, update_sensory=update_sensory) # use the input mask if provided if mask is not None: # inform the manager of the new objects, and get a list of temporary id # temporary ids -- indicates the position of objects in the tensor # (starts with 1 due to the background channel) corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects) mask, _ = pad_divide_by(mask, 16) if need_segment: # merge predicted mask with the incomplete input mask pred_prob_no_bg = pred_prob_with_bg[1:] # use the mutual exclusivity of segmentation if idx_mask: pred_prob_no_bg[:, mask > 0] = 0 else: pred_prob_no_bg[:, mask.max(0) > 0.5] = 0 new_masks = [] for mask_id, tmp_id in enumerate(corresponding_tmp_ids): if idx_mask: this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg) else: this_mask = mask[tmp_id] if tmp_id > pred_prob_no_bg.shape[0]: new_masks.append(this_mask.unsqueeze(0)) else: # +1 for padding the background channel pred_prob_no_bg[tmp_id - 1] = this_mask # new_masks are always in the order of tmp_id mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0) elif idx_mask: # simply convert cls to one-hot representation if len(objects) == 0: if delete_buffer: self.image_feature_store.delete(self.curr_ti) log.warn('Trying to insert an empty mask as memory!') return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16), device=key.device, dtype=key.dtype) mask = torch.stack( [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)], dim=0)
pred_prob_with_bg = aggregate(mask, dim=0)
6
2023-10-19 17:49:24+00:00
16k
stanford-oval/WikiChat
chat_interactive.py
[ { "identifier": "DialogueTurn", "path": "pipelines/dialog_turn.py", "snippet": "class DialogueTurn:\n def __init__(\n self,\n agent_utterance: str = None,\n user_utterance: str = None,\n pipeline: str = None,\n engine: str = None,\n generate_engine: str = None,\n draft_engine: str = None,\n ):\n self.engine = engine\n self.generate_engine = generate_engine\n self.draft_engine = draft_engine\n self.pipeline = pipeline\n self.wall_time_seconds = (\n 0 # how much time it took to generate this turn, in seconds\n )\n self.agent_utterance = agent_utterance\n self.user_utterance = user_utterance\n\n # retrieve_and_generate pipeline\n self.initial_search_query = None\n self.initial_search_query_time = None\n self.initial_search_results = []\n self.initial_search_result_titles = []\n self.initial_search_bullets = []\n\n # generate_and_correct pipeline\n self.llm_utterance = None\n self.claims = []\n self.verification_retrieval_results = {}\n self.verification_result = {}\n\n # early_combine pipeline\n self.combined_evidences = []\n self.combined_utterance = None\n self.feedback = []\n self.feedback_scores = []\n self.refined_utterance = None\n\n def _summarize_vc_log(self):\n verification_summary = {}\n assert len(self.verification_result) == len(\n self.verification_retrieval_results\n ), \"We need to have retrieved evidence for all claims\"\n for key, value in self.verification_retrieval_results.items():\n claim_idx = int(key)\n v_ret_results = []\n for v in value:\n title, paragraph, score = tuple(v)\n v_ret_results.append(\n {\"title\": title, \"paragraph\": paragraph, \"score\": round(score, 1)}\n )\n verification_summary[self.claims[claim_idx][0]] = OrderedDict(\n {\n \"label\": self.verification_result[claim_idx][\"label\"],\n \"fixed_claim\": self.verification_result[claim_idx][\"fixed_claim\"],\n \"retrieval_results\": v_ret_results,\n }\n )\n return verification_summary\n\n def _summarize_rg_log(self):\n rg_summary = {\n \"initial_search_query\": self.initial_search_query,\n \"initial_search_query_time\": self.initial_search_query_time,\n \"initial_search_bullets\": self.initial_search_bullets,\n \"initial_search_results\": [],\n }\n\n for i in range(len(self.initial_search_results)):\n rg_summary[\"initial_search_results\"].append(\n {\n \"title\": self.initial_search_result_titles[i],\n \"paragraph\": self.initial_search_results[i],\n # 'bullets': self.initial_search_bullets,\n }\n )\n\n return rg_summary\n\n def log(self):\n \"\"\"\n Returns a json object that contains all information inside `self`\n \"\"\"\n # combine fields into a more human-readable field\n verification_summary = self._summarize_vc_log()\n rg_summary = self._summarize_rg_log()\n\n return OrderedDict(\n {\n # retrieve_and_generate pipeline\n \"retrieve_and_generate\": rg_summary,\n # generate_and_correct pipeline\n \"llm_utterance\": self.llm_utterance,\n \"generate_and_correct\": verification_summary,\n # early_combine pipeline\n \"combined_evidences\": self.combined_evidences,\n \"combined_utterance\": self.combined_utterance,\n \"feedback\": self.feedback,\n \"feedback_scores\": self.feedback_scores,\n \"refined_utterance\": self.refined_utterance,\n \"user_utterance\": self.user_utterance,\n \"agent_utterance\": self.agent_utterance,\n \"engine\": self.engine,\n \"generate_engine\": self.generate_engine,\n \"draft_engine\": self.draft_engine,\n \"pipeline\": self.pipeline,\n \"wall_time_seconds\": round(self.wall_time_seconds, 1),\n }\n )\n\n @staticmethod\n def utterance_list_to_dialog_history(utterance_list: List[str]):\n \"\"\"\n The resulting dialog history will not have all the fields correctly initialized, since no information about e.g. search queries is available\n \"\"\"\n dialog_history = []\n assert (\n len(utterance_list) % 2 == 1\n ), \"The first turn is always the user, and the turn to be generated is always the agent, so the number of turns should be odd\"\n for i in range(0, len(utterance_list) - 2, 2):\n dialog_history.append(\n DialogueTurn(\n user_utterance=utterance_list[i],\n agent_utterance=utterance_list[i + 1],\n )\n )\n user_utterance = utterance_list[-1]\n\n return dialog_history, user_utterance\n\n @staticmethod\n def dialog_history_to_utterance_list(dialog_history) -> List[str]:\n \"\"\"\n Convert a list of DialogueTurns to a list of strings\n \"\"\"\n utterance_list = []\n for turn in dialog_history:\n utterance_list.append(turn.user_utterance)\n utterance_list.append(turn.agent_utterance)\n return utterance_list" }, { "identifier": "Chatbot", "path": "pipelines/chatbot.py", "snippet": "class Chatbot:\n \"\"\"\n A stateless chatbot. Stateless means that it does not store the history of the dialog in itself, but requires it as an input\n \"\"\"\n\n def __init__(self, args) -> None:\n # Initialize everything, because we can change the pipeline on the fly using system_parameters\n self.claim_splitter = ClaimSplitter(args.claim_prompt_template_file)\n self.evi_num = args.evi_num\n self.colbert_endpoint = args.colbert_endpoint\n self.retrieval_num = args.retrieval_num\n self.refiner = Refiner(prompt=args.refinement_prompt, args=args)\n\n self.temperature = args.temperature\n self.max_tokens = args.max_tokens\n self.top_p = args.top_p\n self.presence_penalty = args.presence_penalty\n self.frequency_penalty = args.frequency_penalty\n self.skip_verification = args.skip_verification\n\n # default parameters, can be overridden:\n self.engine = args.engine\n self.generate_engine = args.generate_engine\n self.draft_engine = args.draft_engine\n self.do_refine=args.do_refine\n self.fuse_claim_splitting = args.fuse_claim_splitting\n\n def generate_next_turn(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n pipeline: str,\n system_parameters: dict = {},\n ):\n \"\"\"\n Generate the next turn of the dialog\n system_parameters: can override some of the default parameters defined in __init__()\n \"\"\"\n # throw error if system_parameters contains keys that are not supported\n for key in system_parameters:\n assert key in [\n \"engine\",\n \"generate_engine\",\n \"draft_engine\",\n \"fuse_claim_splitting\",\n \"do_refine\",\n ], f\"Unsupported system_parameter key: {key}\"\n\n engine = system_parameters.get(\"engine\", self.engine)\n generate_engine = system_parameters.get(\"generate_engine\", self.generate_engine)\n if generate_engine is None:\n # this means that the default `generate_engine` was not provided via commandline, and system_parameters is not override it either.\n # So default to `engine`\n generate_engine = engine\n draft_engine = system_parameters.get(\"draft_engine\", self.draft_engine)\n if draft_engine is None:\n draft_engine = engine\n fuse_claim_splitting = system_parameters.get(\"fuse_claim_splitting\", self.fuse_claim_splitting)\n engine_dict = {\"default\": engine, \"generate\": generate_engine, \"draft\": draft_engine}\n do_refine = system_parameters.get(\"do_refine\", self.do_refine)\n\n start_time = time.time()\n\n if pipeline == \"generate_and_correct\":\n new_dlg_turn = self.generate_and_correct_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"retrieve_and_generate\":\n new_dlg_turn = self.retrieve_and_generate_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"generate\":\n reply = self._generate_only(\n \"baseline_chatbot.prompt\",\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n new_dlg_turn.llm_utterance = reply\n new_dlg_turn.agent_utterance = reply\n elif pipeline == \"retrieve_only\":\n new_dlg_turn = self.retrieve_only_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n )\n elif pipeline == \"early_combine\":\n new_dlg_turn = self.early_combine_pipeline(\n object_dlg_history,\n new_user_utterance=new_user_utterance,\n engine_dict=engine_dict,\n fuse_claim_splitting=fuse_claim_splitting\n )\n else:\n raise ValueError\n\n if do_refine == \"True\" or do_refine == \"true\" or do_refine == True:\n do_refine = True\n else:\n do_refine = False\n\n if do_refine:\n prerefinement_agent_utterance = new_dlg_turn.agent_utterance\n new_dlg_turn.agent_utterance = self.refiner.set_refinement_fields(\n object_dlg_history, new_dlg_turn, engine_dict=engine_dict\n )\n if new_dlg_turn.agent_utterance == prerefinement_agent_utterance:\n logger.info(\"Refinement did NOT change the agent utterance\")\n\n new_dlg_turn.engine = engine\n new_dlg_turn.generate_engine = generate_engine\n new_dlg_turn.draft_engine = draft_engine\n new_dlg_turn.pipeline = pipeline\n\n end_time = time.time()\n new_dlg_turn.wall_time_seconds = end_time - start_time\n\n return new_dlg_turn\n\n def retrieve_only_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n # search based on the history of the dialog so far\n search_prompt_output = llm_generate(\n template_file=\"query.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_user_utterance\": new_user_utterance,\n \"force_search\": True,\n },\n engine=engine_dict[\"default\"],\n max_tokens=50,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=[\"\\n\"],\n postprocess=False,\n )\n search_prompt_output = (\n \"Yes. \" + search_prompt_output\n ) # because we are forcing a search\n self._handle_search_prompt_output(\n search_prompt_output=search_prompt_output,\n new_dlg_turn=new_dlg_turn,\n num_paragraphs=1,\n summarize_results=False,\n engine_dict=engine_dict,\n )\n\n paragraph = new_dlg_turn.initial_search_results[\n 0\n ] # we only retrieve one paragraph\n title = new_dlg_turn.initial_search_result_titles[0]\n new_dlg_turn.agent_utterance = (\n 'I found an article titled \"' + title + '\": ' + paragraph\n )\n return new_dlg_turn\n\n def retrieve_and_generate_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n reply = self._retrieve_and_generate(\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n new_dlg_turn.agent_utterance = reply\n\n return new_dlg_turn\n\n def generate_and_correct_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: str,\n ):\n \"\"\"\n Verify and correct the last turn of a given dialog using retrieved evidences\n Args:\n - `object_dlg_history` (list): previous dialog turns\n - `new_user_utterance` (str): last user utterance\n Returns:\n - `corrected_reply` (str): corrected LLM response\n - `new_dialog_turn` (DialogTurn)\n \"\"\"\n original_reply = self._generate_only(\n \"generate.prompt\",\n object_dlg_history,\n new_user_utterance,\n engine_dict=engine_dict,\n )\n\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n new_dlg_turn.llm_utterance = original_reply\n\n new_dlg_turn.agent_utterance = self._generate_and_correct_reply(\n object_dlg_history,\n new_user_utterance,\n original_reply,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n\n return new_dlg_turn\n\n def early_combine_pipeline(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: str,\n fuse_claim_splitting: bool\n ):\n new_dlg_turn = DialogueTurn(user_utterance=new_user_utterance)\n\n # gather evidence from two routs in parallel\n with ThreadPoolExecutor(2) as executor:\n search_summary = executor.submit(\n self._search_and_summarize,\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n )\n supported_claims = executor.submit(\n self._generate_split_and_fact_check,\n object_dlg_history,\n new_user_utterance,\n new_dlg_turn,\n engine_dict=engine_dict,\n fuse_claim_splitting=fuse_claim_splitting\n )\n search_summary = search_summary.result()\n supported_claims = supported_claims.result()\n\n combined_evi = search_summary + supported_claims\n # logger.info('Combined evidences: %s', new_dlg_turn.combined_evidences)\n new_dlg_turn.combined_evidences = combined_evi\n\n if not combined_evi:\n logger.info(\"Combined evidence is empty\")\n # if new_dlg_turn.initial_search_query is None:\n # new_dlg_turn.combined_utterance = original_reply # no search needed, so return the original chitchat response\n # else:\n # new_dlg_turn.combined_utterance = \"Sorry, I'm not sure.\" # will become more conversational after refinement\n # else:\n new_dlg_turn.combined_utterance = self._reply_using_combined_evidence(\n object_dlg_history,\n new_user_utterance,\n combined_evi,\n engine_dict=engine_dict,\n )\n new_dlg_turn.agent_utterance = new_dlg_turn.combined_utterance\n\n return new_dlg_turn\n\n def _handle_search_prompt_output(\n self,\n search_prompt_output: str,\n new_dlg_turn: DialogueTurn,\n num_paragraphs,\n summarize_results: bool,\n engine_dict: dict,\n ):\n \"\"\"\n Updates `new_dlg_turn` with logs\n A sample output is: Yes. You Google \"James E. Webb the administrator of NASA\". The year of the results is \"none\".]\n \"\"\"\n reranking_factor = 3 # we will retrieve num_paragraphs * reranking_factor paragraphs before reranking them\n\n search_prompt_output = search_prompt_output.strip()\n search_pattern = (\n r'Yes\\. You.*\"([^\"]*)\".* The year of the results is \"([^=]*)\"\\.]?'\n )\n search_match = re.match(search_pattern, search_prompt_output)\n\n if search_prompt_output.startswith(\"No\"):\n # sometimes LLM outputs No. with extra explanation afterwards instead of ']', or \"No search needed\". So this more lax condition leads to fewer Exceptions\n logger.info(\"No search needed.\")\n elif search_match:\n search_query = search_match.group(1)\n search_query_time = search_match.group(2)\n y = extract_year(title=\"\", passage=search_query)\n if len(y) > 0:\n logger.info(\"Overriding query year\")\n search_query_time = y[0]\n logger.info(\"search_query = %s\", search_query)\n logger.info(\"search_query_time = %s\", search_query_time)\n\n # retrieve more paragraphs so that we can do date-based reranking (if needed) and skip \"None\" summaries (if any)\n paragraphs, scores, titles = self._colbert_retrieve(\n query=search_query,\n num_paragraphs=num_paragraphs * reranking_factor,\n rerank=search_query_time,\n )\n\n logger.info(\"Colbert titles: %s\", str(titles))\n\n if summarize_results:\n bullets = []\n not_none_paragraphs = []\n not_none_titles = []\n # summarize in batches, until we reach `num_paragraphs` paragraphs that are deemed relevant\n for start_idx in range(\n 0, num_paragraphs * reranking_factor, num_paragraphs\n ):\n b, not_none_paragraph_indices = self._summarize_results(\n search_query,\n paragraphs[start_idx : start_idx + num_paragraphs],\n titles[start_idx : start_idx + num_paragraphs],\n maximum_paragraphs_needed=num_paragraphs\n - len(not_none_paragraphs),\n engine_dict=engine_dict,\n )\n # print(\"not_none_paragraph_indices = \", not_none_paragraph_indices)\n not_none_paragraphs += [\n paragraphs[start_idx + i] for i in not_none_paragraph_indices\n ]\n not_none_titles += [\n titles[start_idx + i] for i in not_none_paragraph_indices\n ]\n bullets = bullets + b\n assert len(not_none_paragraphs) <= num_paragraphs\n if len(not_none_paragraphs) == num_paragraphs:\n break\n titles = not_none_titles\n paragraphs = not_none_paragraphs\n\n else:\n paragraphs = paragraphs[:num_paragraphs]\n titles = titles[:num_paragraphs]\n bullets = None\n\n # log everything\n new_dlg_turn.initial_search_query = search_query\n new_dlg_turn.initial_search_query_time = search_query_time\n new_dlg_turn.initial_search_results = paragraphs\n new_dlg_turn.initial_search_result_titles = titles\n new_dlg_turn.initial_search_bullets = bullets\n else:\n raise ValueError(\n \"Search prompt's output is invalid: %s\" % search_prompt_output\n )\n # logger.error('Search prompt\\'s output is invalid: %s' % search_prompt_output)\n\n def _summarize_results(\n self,\n search_query,\n paragraphs,\n titles,\n maximum_paragraphs_needed,\n engine_dict,\n ):\n \"\"\"\n Summarizes `paragraphs` and returns the indices of at most `maximum_paragraphs_needed` paragraphs that are deemed relevant to the `query`\n \"\"\"\n summaries = llm_generate(\n template_file=\"summarize_and_filter.prompt\",\n prompt_parameter_values=[\n {\"title\": t, \"article\": p, \"query\": search_query}\n for (t, p) in zip(titles, paragraphs)\n ],\n engine=engine_dict[\"default\"],\n max_tokens=200,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=None,\n postprocess=False,\n )\n bullets = []\n not_none_paragraph_indices = []\n for paragraph_idx, s in enumerate(summaries):\n if s.startswith(\"Yes. \"):\n # necessary for distilled models\n s = s[5:]\n if s.startswith(\"None\") or s == \"- None\" or s == \"-None\":\n # skip the None paragraphs\n logger.info(\n \"This retrieved paragraphs was deemed unrelated: %s\",\n paragraphs[paragraph_idx],\n )\n continue\n not_none_paragraph_indices.append(paragraph_idx)\n for b in s.split(\"\\n-\"):\n b = b.strip()\n if len(b) == 0:\n continue\n if not b.endswith(\".\"):\n # most likely a partial generation that was cut off because of max_tokens\n continue\n bullets.append(b.strip(\"- \"))\n if len(not_none_paragraph_indices) == maximum_paragraphs_needed:\n break\n\n return bullets, not_none_paragraph_indices\n\n def _retrieve_and_generate(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Retrieves related documents and generates a reply base on them, given the dialog history\n Updates `new_dlg_turn` with logs\n Returns reply\n \"\"\"\n self._search_and_summarize(\n object_dlg_history, new_user_utterance, new_dlg_turn, engine_dict\n )\n\n reply = llm_generate(\n template_file=\"draft.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": new_user_utterance,\n \"evidences\": new_dlg_turn.initial_search_bullets,\n },\n engine=engine_dict[\"default\"],\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n top_p=self.top_p,\n presence_penalty=self.presence_penalty,\n stop_tokens=[\"\\n\"],\n postprocess=True,\n )\n return reply\n\n def _search_and_summarize(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ):\n search_prompt_output = llm_generate(\n template_file=\"query.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"new_user_utterance\": new_user_utterance,\n \"force_search\": False,\n },\n engine=engine_dict[\"default\"],\n max_tokens=50,\n temperature=0.0,\n top_p=0.5,\n stop_tokens=[\"\\n\"],\n postprocess=False,\n )\n self._handle_search_prompt_output(\n search_prompt_output=search_prompt_output,\n new_dlg_turn=new_dlg_turn,\n num_paragraphs=self.retrieval_num,\n summarize_results=True,\n engine_dict=engine_dict,\n )\n return new_dlg_turn.initial_search_bullets\n\n def _generate_split_and_fact_check(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n fuse_claim_splitting: bool\n ):\n original_reply = self._generate_only(\n \"generate.prompt\",\n object_dlg_history,\n new_user_utterance,\n engine_dict=engine_dict,\n )\n if not fuse_claim_splitting:\n new_dlg_turn.llm_utterance = original_reply\n claims_output = None\n else:\n new_dlg_turn.llm_utterance = None\n claims_output = original_reply\n\n\n claims = self.claim_splitter.split_claim(\n dialog_history=object_dlg_history,\n new_user_utterance=new_user_utterance,\n current_agent_utterance=original_reply,\n engine_dict=engine_dict,\n claims_output=claims_output\n )\n\n new_dlg_turn.claims = claims\n if not claims:\n logger.info(\"No claims to check\")\n return []\n\n # retrieve evidence\n ret_output = self._retrieve_evidences(claims)\n\n # verify claims\n ver_output = self._verify_claims(\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct=False,\n engine_dict=engine_dict,\n )\n\n new_dlg_turn.verification_retrieval_results = ret_output\n new_dlg_turn.verification_result = ver_output\n\n # only keep supported claim\n supported_claims = []\n for label_fix in ver_output:\n verification_label, fixed_claim = (\n label_fix[\"label\"],\n label_fix[\"fixed_claim\"],\n )\n if verification_label == \"SUPPORTS\":\n supported_claims.append(fixed_claim)\n return supported_claims\n\n def _generate_and_correct_reply(\n self,\n object_dlg_history: List[DialogueTurn],\n new_user_utterance: str,\n original_reply: str,\n new_dlg_turn: DialogueTurn,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Verifies and corrects `original_reply` given the dialog history\n Updates `new_dlg_turn` with logs\n Returns corrected reply\n \"\"\"\n # split claims\n # the returned \"claims\" is a list of tuples (claim, year)\n claims = self.claim_splitter.split_claim(\n dialog_history=object_dlg_history,\n new_user_utterance=new_user_utterance,\n current_agent_utterance=original_reply,\n engine_dict=engine_dict,\n )\n claims = ClaimSplitter.remove_claims_from_previous_turns(claims, object_dlg_history)\n if not claims:\n logger.info(\"No claims to check\")\n return original_reply\n new_dlg_turn.claims = claims\n\n # retrieve evidence\n ret_output = self._retrieve_evidences(claims)\n\n # TODO: use the ret_output together with initial search outputs for verification\n # verify claims\n ver_output = self._verify_claims(\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct=True,\n engine_dict=engine_dict,\n )\n\n # update dialog turn\n new_dlg_turn.verification_retrieval_results = ret_output\n new_dlg_turn.verification_result = ver_output\n if is_everything_verified(ver_output):\n logger.info(\"All claims passed verification, nothing to correct\")\n return original_reply\n\n # correction\n corrected_reply = original_reply\n fixed_claims = []\n for label_fix in ver_output:\n verification_label, fixed_claim = (\n label_fix[\"label\"],\n label_fix[\"fixed_claim\"],\n )\n if (\n verification_label == \"SUPPORTS\"\n ): # if the claim is already correct, no need to fix\n continue\n fixed_claims.append(fixed_claim)\n assert len(fixed_claims) > 0\n corrected_reply = self._correct(\n original_reply,\n object_dlg_history,\n new_user_utterance,\n fixed_claims, # corrected claim for REFUTE and \"I'm not sure\" for NOT ENOUGH INFO claims.\n engine_dict=engine_dict,\n )\n\n return corrected_reply\n\n def _generate_only(\n self,\n generation_prompt: str,\n dialog_history: List[DialogueTurn],\n new_user_utterance: str,\n engine_dict: dict,\n ) -> str:\n \"\"\"\n Generate baseline LLM response\n Args:\n - `generation_prompt` (str): the .prompt file to use for this stage\n - `dialog_history` (list): previous turns\n Returns:\n - `reply`(str): original LLM response\n \"\"\"\n reply = llm_generate(\n template_file=generation_prompt,\n prompt_parameter_values={\n \"dlg\": dialog_history,\n \"new_user_utterance\": new_user_utterance,\n \"engine_name\": engine_dict[\"generate\"] # used to enforce model knowledge cut-off date for models other than GPT-4\n },\n engine=engine_dict[\"generate\"],\n max_tokens=self.max_tokens,\n temperature=self.temperature,\n stop_tokens=[\"\\n\"],\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return reply\n\n def _correct(\n self,\n original_reply,\n object_dlg_history,\n last_user_utterance,\n fixed_claims,\n engine_dict: dict,\n ):\n \"\"\"\n Given context + original response + evidence for a claim, fix the original response\n\n Args:\n - `original_reply`(str): LLM's original response\n - `object_dlg_history`(list): list of previous DialogueTurns\n - `last_user_utterance` (str): last user utterance\n - `fixed_claims` (list): list of fixed claims\n Returns:\n - `corrected_reply`(str): corrected LLM response\n \"\"\"\n # correction prompt's context should be in one line\n correction_reply = llm_generate(\n template_file=\"correction_combiner.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": last_user_utterance,\n \"original_reply\": original_reply,\n \"fixed_claims\": fixed_claims,\n },\n engine=engine_dict[\"default\"],\n max_tokens=self.max_tokens,\n temperature=0,\n stop_tokens=[\"\\n\"],\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return correction_reply\n\n def _reply_using_combined_evidence(\n self,\n object_dlg_history,\n last_user_utterance,\n evidences,\n engine_dict: dict,\n ):\n combined_reply = llm_generate(\n template_file=\"draft.prompt\",\n prompt_parameter_values={\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": last_user_utterance,\n \"evidences\": evidences,\n },\n engine=engine_dict[\"draft\"],\n max_tokens=self.max_tokens,\n temperature=0,\n stop_tokens=None,\n top_p=self.top_p,\n frequency_penalty=self.frequency_penalty,\n presence_penalty=self.presence_penalty,\n postprocess=True,\n )\n\n return combined_reply\n\n def _colbert_retrieve(\n self,\n query: str,\n num_paragraphs: int,\n rerank=\"none\",\n top_p=1,\n ):\n \"\"\"\n Args:\n `num_paragraphs`: number of paragraphs that will be output\n `rerank` (str): one of 'none', 'recent' or a year like '2005'. 'none' disables reranking. 'recent' retrieves more and returns the most recent ones.\n '2005' boosts the ranking of results that match 2005. The date of a result is determined by the year numbers it contains.\n `top_p` (float): chooses from the smallest possible set of results whose cumulative probability exceeds top_p\n Returns:\n `passages` (list): a list of passage texts (excluding the title) with the highest similarities to the `query`\n `passage_scores` (list): a list of similarity scores of each passage in `passsages` with `query`\n `passage_titles` (list): a list of passage titles\n \"\"\"\n\n # print(self.colbert_endpoint, {'query': query, 'evi_num': num_paragraphs})\n response = requests.get(\n self.colbert_endpoint,\n json={\"query\": query, \"evi_num\": num_paragraphs},\n )\n if response.status_code != 200:\n raise Exception(\"ColBERT Search API Error: %s\" % str(response))\n results = response.json()\n passages = []\n passage_titles = []\n for r in results[\"passages\"]:\n r = r.split(\"|\", maxsplit=1)\n passage_titles.append(r[0].strip())\n passages.append(r[1].strip())\n scores = results[\"passage_scores\"]\n probs = results[\"passage_probs\"]\n # print(\"probs = \", probs)\n top_p_cut_off = np.cumsum(probs) > top_p\n if not np.any(top_p_cut_off):\n # even if we include everything, we don't get to top_p\n top_p_cut_off = len(scores)\n else:\n top_p_cut_off = np.argmax(top_p_cut_off) + 1\n # print(\"top_p_cut_off = \", top_p_cut_off)\n passages, scores, passage_titles = (\n passages[:top_p_cut_off],\n scores[:top_p_cut_off],\n passage_titles[:top_p_cut_off],\n )\n\n if rerank == \"none\":\n pass\n else:\n all_passage_dates = []\n for t, p in zip(passage_titles, passages):\n passage_years = extract_year(title=t, passage=p)\n all_passage_dates.append(passage_years)\n if rerank == \"recent\":\n sort_fn = lambda x: max(\n x[3] if len(x[3]) > 0 else [0]\n ) # sort based on the latest year mentioned in the paragraph, demoting paragraphs that don't mention a year\n else:\n # rerank is a year\n try:\n query_year = int(rerank)\n except ValueError as e:\n # raise ValueError('rerank should be none, recent or an integer.')\n logger.error(e)\n return (\n passages[:num_paragraphs],\n scores[:num_paragraphs],\n passage_titles[:num_paragraphs],\n )\n sort_fn = lambda x: x[3].count(\n query_year\n ) # boost the passages that have a matching year with the query, the more they mention the date the more we boost\n\n # logger.info('Search result dates before date-based reranking: %s', str(all_passage_dates))\n passages, scores, passage_titles, all_passage_dates = list(\n zip(\n *sorted(\n zip(passages, scores, passage_titles, all_passage_dates),\n reverse=True,\n key=sort_fn,\n )\n )\n )\n # logger.info('Search result dates after date-based reranking: %s', str(all_passage_dates))\n\n # choose top num_paragraphs paragraphs\n passages, scores, passage_titles = (\n passages[:num_paragraphs],\n scores[:num_paragraphs],\n passage_titles[:num_paragraphs],\n )\n\n return passages, scores, passage_titles\n\n def _retrieve_evidences(self, claims, top_p: float = 1):\n \"\"\"\n Retrieve evidences\n Args:\n - `claims` (list): list of (claim, year)\n - `top_p` (float): chooses from the smallest possible set of results whose cumulative probability exceeds top_p\n Returns:\n - `ret_output` (dict): a dict from claim_id to a list of `evidence`\n - each `evidence` is a list of length 5: [`title of wikipedia page`, `wikipedia text`, `similarity_score`]\n \"\"\"\n ret_output = dict()\n for id, (cl, year) in enumerate(claims):\n # if self.args.reranking_method == \"none\":\n # No re-ranking on evidence. Reranking to match the dates increases the risk of confirmation bias.\n passages, passage_scores, passage_titles = self._colbert_retrieve(\n query=cl, num_paragraphs=self.evi_num, top_p=top_p, rerank=\"none\"\n )\n # else:\n # # retrieve more so that we can match the dates\n # passages, passage_scores, passage_titles = self._colbert_retrieve(\n # query=cl,\n # num_paragraphs=self.evi_num,\n # rerank=year,\n # num_paragraphs_for_reranking=self.evi_num * 3,\n # top_p=top_p,\n # )\n evidences = []\n for passage, score, title in zip(passages, passage_scores, passage_titles):\n evidences.append([title, passage, score])\n ret_output[id] = evidences\n\n return ret_output\n\n def _verify_claims(\n self,\n claims,\n ret_output,\n object_dlg_history,\n new_user_utterance,\n original_reply,\n do_correct: bool,\n engine_dict: dict,\n ):\n \"\"\"\n Verify claims using retrieval output\n Args:\n - `claims` (list): list of (claim, year) pairs splitted\n - `ret_output` (dict): a dict from claim_id to a list of `evidence`\n - each `evidence` is a list of length 5: [`title of wikipedia page`, `wikipedia text`, `similarity_score`]\n - `object_dlg_history`(str): list of previous DialogueTurns\n - `last_user_utterance` (str): last user utterance\n - `original_reply`(str): original LLM response\n Returns:\n - `ver_output` (list): a list of verification label (\"SUPPORTS\", \"REFUTES\", \"NOT ENOUGH INFO\") and the fixed claims\n \"\"\"\n ver_output = []\n parameter_values_list = []\n\n for claim_id, (cl, year) in enumerate(claims):\n evidences = ret_output[claim_id][: self.evi_num]\n parameter_values_list.append(\n {\n \"dlg\": object_dlg_history,\n \"last_user_utterance\": new_user_utterance,\n \"original_reply\": original_reply,\n \"claim\": cl,\n \"evidence_titles\": [e[0] for e in evidences],\n \"evidence_texts\": [e[1] for e in evidences],\n \"do_correct\": do_correct\n }\n )\n\n # when using gold evidence, we do not split claim so claim is the same with original reply\n if self.skip_verification:\n all_verification_responses = ['is \"SUPPORTS\"'] * len(claims)\n else:\n all_verification_responses = llm_generate(\n template_file=\"verify.prompt\",\n prompt_parameter_values=parameter_values_list,\n engine=engine_dict[\"default\"],\n max_tokens=200,\n temperature=0,\n stop_tokens=None,\n postprocess=False,\n )\n\n for (cl, year), verification_response in zip(\n claims, all_verification_responses\n ):\n # logger.info(\"claim: %s ; verification_response: %s\", cl, verification_response)\n # the following handles cases where smaller models like gpt-35-turbo do not follow the few-shot examples' format\n if (\n 'is \"supports\"' in verification_response.lower()\n or \"no fact-checking is needed for this claim\"\n in verification_response.lower()\n or \"the fact-checking result is not applicable to this response\"\n in verification_response.lower()\n ):\n verification_label = \"SUPPORTS\"\n fixed_claim = cl\n elif (\n 'the fact-checking result is \"not enough info\"'\n in verification_response.lower()\n ):\n verification_label = \"NOT ENOUGH INFO\"\n fixed_claim = \"\"\n else:\n verification_label = \"REFUTES\" # default set to be \"REFUTES\"\n fixed_claim = \"\"\n\n if do_correct and verification_label != \"SUPPORTS\":\n if \"You rewrite your claim:\" in verification_response:\n fixed_claim = verification_response.split(\n \"You rewrite your claim:\"\n )[-1].strip()\n else:\n logger.error(\n \"verification prompt did not fix a %s. Output: %s\"\n % (verification_label, verification_response)\n )\n\n ver_output.append({\"label\": verification_label, \"fixed_claim\": fixed_claim})\n\n return ver_output" }, { "identifier": "input_user", "path": "pipelines/utils.py", "snippet": "def input_user() -> str:\n try:\n user_utterance = input(bcolors.OKCYAN + bcolors.BOLD + \"User: \")\n # ignore empty inputs\n while not user_utterance.strip():\n user_utterance = input(bcolors.OKCYAN + bcolors.BOLD + \"User: \")\n finally:\n print(bcolors.ENDC)\n return user_utterance" }, { "identifier": "print_chatbot", "path": "pipelines/utils.py", "snippet": "def print_chatbot(s: str):\n print(bcolors.OKGREEN + bcolors.BOLD + s + bcolors.ENDC)" }, { "identifier": "make_parent_directories", "path": "pipelines/utils.py", "snippet": "def make_parent_directories(file_name: str):\n \"\"\"\n Creates the parent directories of `file_name` if they don't exist\n \"\"\"\n pathlib.Path(os.path.dirname(file_name)).mkdir(parents=True, exist_ok=True)" }, { "identifier": "add_pipeline_arguments", "path": "pipelines/pipeline_arguments.py", "snippet": "def add_pipeline_arguments(parser):\n # determine components of the pipeline\n parser.add_argument(\n \"--pipeline\",\n type=str,\n required=True,\n choices=[\n \"generate_and_correct\",\n \"retrieve_and_generate\",\n \"generate\",\n \"retrieve_only\",\n \"early_combine\",\n \"atlas\",\n ],\n default=\"generate_and_correct\",\n help=\"The type of pipeline used to imrpove GPT-3 response. Only used to know which modules to load.\",\n )\n parser.add_argument(\n \"--claim_prompt_template_file\",\n type=str,\n default=\"split_claims.prompt\",\n help=\"The path to the file containing the claim LLM prompt.\",\n )\n parser.add_argument(\n \"--refinement_prompt\",\n default=\"refine_w_feedback.prompt\",\n help=\"What prompt to use to refine the final response.\",\n )\n parser.add_argument(\n \"--do_refine\", action=\"store_true\", help=\"Whether to refine the final response.\"\n )\n parser.add_argument(\n \"--skip_verification\",\n action=\"store_true\",\n help=\"If True, all claims will be considered correct without fact-checking. Especially useful to speed up debugging of the other parts of the pipeline.\",\n )\n\n parser.add_argument(\n \"--fuse_claim_splitting\",\n action=\"store_true\",\n help=\"If True, The first claim splitting stage of early_combine pipeline will be fused with the generate stage. Only useful for distilled models that have been trained to do this.\",\n )\n\n parser.add_argument(\n \"--colbert_endpoint\",\n type=str,\n default=\"http://127.0.0.1:5000/search\",\n help=\"whether using colbert for retrieval.\",\n )\n parser.add_argument(\n \"--engine\",\n type=str,\n required=True,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--generate_engine\",\n type=str,\n default=None,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use for the 'generate' stage of pipelines. If provided, overrides --engine for that stage.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--draft_engine\",\n type=str,\n default=None,\n choices=[\"atlas\"]\n + local_model_list\n + together_model_list\n + openai_chat_model_list\n + openai_nonchat_model_list,\n help=\"The LLM engine to use for the 'draft' stage of pipelines. If provided, overrides --engine for that stage.\",\n ) # choices are from the smallest to the largest model\n\n parser.add_argument(\n \"--reranking_method\",\n type=str,\n choices=[\"none\", \"date\"],\n default=\"none\",\n help=\"Only used for retrieve_and_generate pipeline\",\n )\n\n # LLM generation hyperparameters\n parser.add_argument(\n \"--max_tokens\",\n type=int,\n default=250,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--temperature\",\n type=float,\n default=0.8,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--top_p\",\n type=float,\n default=0.9,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--frequency_penalty\",\n type=float,\n default=0.0,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n parser.add_argument(\n \"--presence_penalty\",\n type=float,\n default=0.0,\n required=False,\n help=\"Only affects user-facing prompts\",\n )\n\n parser.add_argument(\n \"--evi_num\",\n type=int,\n default=2,\n help=\"Number of evidences to retrieve per claim.\",\n )\n\n parser.add_argument(\n \"--retrieval_num\",\n type=int,\n default=3,\n help=\"Number of passages to retrieve when searching for information.\",\n )" }, { "identifier": "check_pipeline_arguments", "path": "pipelines/pipeline_arguments.py", "snippet": "def check_pipeline_arguments(args):\n # make sure for ATLAS, both engine and pipeline are set to 'atlas'\n if hasattr(args, \"pipeline\"):\n if (args.engine == \"atlas\" and args.pipeline != \"atlas\") or (\n args.engine != \"atlas\" and args.pipeline == \"atlas\"\n ):\n raise ValueError(\n \"When using ATLAS, both `engine` and `pipeline` input arguments should be set to 'atlas'.\"\n )" }, { "identifier": "write_prompt_logs_to_file", "path": "llm/llm_generate.py", "snippet": "def write_prompt_logs_to_file():\n with open(global_variables.prompt_log_file, \"w\") as f:\n f.write(json.dumps(global_variables.prompt_logs, indent=4, ensure_ascii=False))" }, { "identifier": "set_debug_mode", "path": "llm/global_variables.py", "snippet": "def set_debug_mode():\n global debug_prompts\n debug_prompts = True" } ]
import logging import argparse import json import readline # enables keyboard arrows when typing in the terminal from typing import List from pygments import highlight from pygments.formatters.terminal256 import Terminal256Formatter from pygments.lexers.web import JsonLexer from pipelines.dialog_turn import DialogueTurn from pipelines.chatbot import Chatbot from pipelines.utils import input_user, print_chatbot, make_parent_directories from pipelines.pipeline_arguments import ( add_pipeline_arguments, check_pipeline_arguments, ) from llm.llm_generate import write_prompt_logs_to_file from llm.global_variables import set_debug_mode
11,642
""" Chat with the chatbot via command line """ logging.getLogger("openai").setLevel(logging.ERROR) logger = logging.getLogger(__name__) def main(args): chatbot = Chatbot(args) dlg_history: List[DialogueTurn] = [] while True: try: user_utterance = input_user() except EOFError: # stop the chatbot break # check for special commands if user_utterance in args.quit_commands: # stop the chatbot break if user_utterance in ["clear", "cls"]: # restart the dialog dlg_history = [] continue new_dlg_turn = chatbot.generate_next_turn( dlg_history, user_utterance, pipeline=args.pipeline ) dlg_history.append(new_dlg_turn) turn_log = json.dumps(new_dlg_turn.log(), indent=2, ensure_ascii=False) colorful_turn_log = highlight( turn_log, lexer=JsonLexer(), formatter=Terminal256Formatter(style="bw"), ) logger.info("Turn log: %s", colorful_turn_log) print_chatbot("Chatbot: " + new_dlg_turn.agent_utterance) make_parent_directories(args.output_file) with open(args.output_file, "a") as outfile: if len(dlg_history) == 1: # first turn outfile.write("=====\n") outfile.write("User: " + new_dlg_turn.user_utterance + "\n") outfile.write("Chatbot: " + new_dlg_turn.agent_utterance + "\n") with open(args.output_file.strip("txt") + "log", "a") as outfile: outfile.write(turn_log) outfile.write("\n") if __name__ == "__main__": # text generation arguments parser = argparse.ArgumentParser()
""" Chat with the chatbot via command line """ logging.getLogger("openai").setLevel(logging.ERROR) logger = logging.getLogger(__name__) def main(args): chatbot = Chatbot(args) dlg_history: List[DialogueTurn] = [] while True: try: user_utterance = input_user() except EOFError: # stop the chatbot break # check for special commands if user_utterance in args.quit_commands: # stop the chatbot break if user_utterance in ["clear", "cls"]: # restart the dialog dlg_history = [] continue new_dlg_turn = chatbot.generate_next_turn( dlg_history, user_utterance, pipeline=args.pipeline ) dlg_history.append(new_dlg_turn) turn_log = json.dumps(new_dlg_turn.log(), indent=2, ensure_ascii=False) colorful_turn_log = highlight( turn_log, lexer=JsonLexer(), formatter=Terminal256Formatter(style="bw"), ) logger.info("Turn log: %s", colorful_turn_log) print_chatbot("Chatbot: " + new_dlg_turn.agent_utterance) make_parent_directories(args.output_file) with open(args.output_file, "a") as outfile: if len(dlg_history) == 1: # first turn outfile.write("=====\n") outfile.write("User: " + new_dlg_turn.user_utterance + "\n") outfile.write("Chatbot: " + new_dlg_turn.agent_utterance + "\n") with open(args.output_file.strip("txt") + "log", "a") as outfile: outfile.write(turn_log) outfile.write("\n") if __name__ == "__main__": # text generation arguments parser = argparse.ArgumentParser()
add_pipeline_arguments(parser)
5
2023-10-19 18:17:25+00:00
16k
jhejna/cpl
research/algs/off_policy_algorithm.py
[ { "identifier": "ReplayBuffer", "path": "research/datasets/replay_buffer/buffer.py", "snippet": "class ReplayBuffer(torch.utils.data.IterableDataset):\n \"\"\"\n Generic Replay Buffer Class.\n\n This class adheres to the following conventions to support multiprocessing:\n 1. Variables/functions starting with \"_\", like \"_help\" are to be used only by the replay buffer internaly. They\n are carefully setup for multiprocesing.\n 2. variables/functions named regularly without a leading \"_\" are to be used by the main thread. This includes\n standard functions like \"add\".\n\n There are a few critical setup options.\n 1. Capacity: determines if the buffer is setup upon creation. If it is set to a known value, then we can add data\n online with `add`, or by pulling more data from disk. If is set to None, the dataset is initialized to the full\n size of the offline dataset.\n 2. path: path to offline data that will be loaded\n 3. _data_generator\n\n Some options are mutually exclusive. For example, it is bad to use a non-distributed layout with\n workers and online data. This will generate a bunch of copy on writes.\n\n Data is expected to be stored in a \"next\" format. This means that data is stored like this:\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n s_3, a_2 , r_2 , d_2 ... End of episode!\n s_0, dummy, dummy, dummy\n s_1, a_0 , r_0 , d_0\n s_2, a_1 , r_1 , d_1\n\n This format is expected from the load(path) funciton.\n\n \"\"\"\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n sample_fn: Union[str, Callable] = \"sample\",\n sample_kwargs: Optional[Dict] = None,\n epoch_ratio: float = 1.0,\n path: Optional[str] = None,\n capacity: Optional[int] = None,\n exclude_keys: Optional[List[str]] = None,\n include_keys: Optional[Dict] = None,\n stacked_obs: bool = False,\n stacked_action: bool = False,\n distributed: bool = False,\n fetch_every: int = 1000,\n cleanup: bool = True,\n ) -> None:\n # Remove stacking if present.\n self.stacked_obs = stacked_obs\n if self.stacked_obs:\n observation_space = remove_stack_dim(observation_space)\n self.stacked_action = stacked_action\n if self.stacked_action:\n action_space = remove_stack_dim(action_space)\n\n self.observation_space = observation_space\n self.action_space = action_space\n\n # Construct the space for the buffer\n self.exclude_keys = [] if exclude_keys is None else exclude_keys # keys to exclude in the storage buffer\n buffer_space = {\n \"obs\": self.observation_space,\n \"action\": self.action_space,\n \"reward\": 0.0,\n \"done\": False,\n \"discount\": 1.0,\n }\n flattened_buffer_space = utils.flatten_dict(buffer_space)\n if include_keys is not None:\n flattened_buffer_space.update(include_keys)\n print(\"FLATTENED BUFFER SPACE\", flattened_buffer_space)\n for k in self.exclude_keys:\n if k in flattened_buffer_space:\n del flattened_buffer_space[k]\n self.buffer_space = utils.nest_dict(flattened_buffer_space)\n\n self.dummy_action = self.action_space.sample()\n self.capacity = capacity\n\n # Setup the sampler\n if isinstance(sample_fn, str):\n sample_fn = vars(sampling)[sample_fn]\n # Use functools partial to override the default args.\n sample_kwargs = {} if sample_kwargs is None else sample_kwargs\n self.sample_fn = functools.partial(sample_fn, **sample_kwargs)\n # Add sampling parameters\n self.epoch_ratio = epoch_ratio\n\n # Path for preloaded data\n self.path = path\n\n # Setup based on distributed value\n self.distributed = distributed\n if self.distributed:\n self.cleanup = cleanup\n self.fetch_every = fetch_every\n if self.capacity is not None:\n self.storage_path = tempfile.mkdtemp(prefix=\"replay_buffer_\")\n print(\"[research] Replay Buffer Storage Path\", self.storage_path)\n self.current_ep = utils.nest_dict({k: list() for k in flattened_buffer_space.keys()})\n self.num_episodes = 0\n else:\n self._alloc(self.capacity) # Alloc immediately\n\n def _alloc(self, capacity):\n # Create the data generator\n self._current_data_generator = self._data_generator()\n\n if capacity is None:\n # Allocte the entire dataset\n data = utils.concatenate(*list(self._current_data_generator), dim=0)\n self._storage = storage.FixedStorage(data)\n else:\n # Construct the buffer space. Remember to exclude any exclude keys\n self._storage = storage.CircularStorage(self.buffer_space, capacity)\n # Fill the storage.\n # if self.path is not None:\n for data in self._current_data_generator:\n self._storage.extend(data)\n if self._storage.size >= self._storage.capacity:\n break\n\n print(\"[ReplayBuffer] Allocated {:.2f} GB\".format(self._storage.bytes / 1024**3))\n\n def _data_generator(self):\n \"\"\"\n Can be overridden in order to load the initial data differently.\n By default assumes the data to be the standard format, and returned as a data dictionary.\n or\n None\n\n This function can be overriden by sub-classes in order to produce data batches.\n It should do the following:\n 1. split data across torch data workers\n 2. randomize the order of data\n 3. yield data of the form dicts\n \"\"\"\n if self.path is None:\n return\n\n # By default get all of the file names that are distributed at the correct index\n worker_info = torch.utils.data.get_worker_info()\n num_workers = 1 if worker_info is None else worker_info.num_workers\n worker_id = 0 if worker_info is None else worker_info.id\n\n ep_filenames = [os.path.join(self.path, f) for f in os.listdir(self.path) if f.endswith(\".npz\")]\n random.shuffle(ep_filenames) # Shuffle all the filenames\n\n if num_workers > 1 and len(ep_filenames) == 1:\n print(\n \"[ReplayBuffer] Warning: using multiple workers but single replay file. Reduce memory usage by sharding\"\n \" data with `save` instead of `save_flat`.\"\n )\n elif num_workers > 1 and len(ep_filenames) < num_workers:\n print(\"[ReplayBuffer] Warning: using more workers than dataset files.\")\n\n for ep_filename in ep_filenames:\n ep_idx, _ = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n # Spread loaded data across workers if we have multiple workers and files.\n if ep_idx % num_workers != worker_id and len(ep_filenames) > 1:\n continue # Only yield the files belonging to this worker.\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n yield data\n\n def _fetch_offline(self) -> int:\n \"\"\"\n This simple function fetches a new episode from the offline dataset and adds it to the buffer.\n This is done for each worker.\n \"\"\"\n try:\n data = next(self._current_data_generator)\n except StopIteration:\n self._current_data_generator = self._data_generator()\n data = next(self._current_data_generator)\n self._storage.extend(data)\n # Return the fetched size\n return len(data[\"done\"]) # data must have the done key for storage\n\n def _fetch_online(self) -> int:\n worker_info = torch.utils.data.get_worker_info()\n assert worker_info is not None, \"Must use distributed buffer for online fetching.\"\n\n ep_filenames = sorted([os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)], reverse=True)\n fetched_size = 0\n for ep_filename in ep_filenames:\n ep_idx, ep_len = [int(x) for x in os.path.splitext(ep_filename)[0].split(\"_\")[-2:]]\n if ep_idx % worker_info.num_workers != worker_info.id:\n continue\n if ep_filename in self._episode_filenames:\n break # We found something we have already loaded\n if fetched_size + ep_len > self._storage.capacity:\n break # do not fetch more than the size of the replay buffer\n\n data = storage.load_data(ep_filename, exclude_keys=self.exclude_keys)\n self._storage.extend(data)\n self._episode_filenames.add(ep_filename)\n if self.cleanup:\n try:\n os.remove(ep_filename)\n except OSError:\n pass\n\n return fetched_size\n\n def _get_dummy_transition(self, obs):\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n dummy_transition = {\n k: v.sample() if isinstance(v, gym.Space) else v\n for k, v in flattened_buffer_space.items()\n if not k.startswith(\"obs\") and not k.startswith(\"action\")\n }\n dummy_transition = utils.nest_dict(dummy_transition)\n dummy_transition[\"obs\"] = obs\n dummy_transition[\"action\"] = self.dummy_action\n return dummy_transition\n\n def _reset_current_ep(self):\n ep_idx = self.num_episodes\n ep_len = len(self.current_ep[\"done\"])\n self.num_episodes += 1\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n storage.save_data(self.current_ep, os.path.join(self.storage_path, ep_filename))\n\n flattened_buffer_space = utils.flatten_dict(self.buffer_space)\n ep = {k: list() for k in flattened_buffer_space.keys()}\n self.current_ep = utils.nest_dict(ep)\n\n def add(self, **kwargs):\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # Preprocess here before adding to storage\n if len(kwargs) == 1:\n assert \"obs\" in kwargs\n kwargs = self._get_dummy_transition(kwargs[\"obs\"])\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n else:\n # We have a full transitions\n if self.stacked_obs:\n kwargs[\"obs\"] = utils.get_from_batch(kwargs[\"obs\"], -1)\n if self.stacked_action:\n kwargs[\"action\"] = utils.get_from_batch(kwargs[\"action\"], -1)\n\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n\n # This function is overwritten for distributed / local buffers\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.append(self.current_ep, kwargs)\n if kwargs[\"done\"]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.add(kwargs)\n\n def extend(self, **kwargs):\n assert \"done\" in kwargs, \"Need done key for ReplayBuffer\"\n assert self.capacity is not None, \"Tried to extend to a static size buffer.\"\n # TODO: There is a chance that if we add a full sequence we will end up with (B, T, stack, ...)\n # which is not what we want. We could compare the shapes of the observation space to fix it\n # but this code might be unnecesary, as this class shouldn't really be used like that anyways.\n if self.distributed:\n # Add to the current thread, and dump to disk\n utils.extend(self.current_ep, kwargs)\n if kwargs[\"done\"][-1]:\n self._reset_current_ep()\n else:\n # Add directly\n self._learning_online = True\n self._storage.extend(kwargs)\n\n def save(self, path):\n os.makedirs(path, exist_ok=True)\n if self.distributed:\n if self.cleanup:\n print(\"[research] Warning, attempting to save a cleaned up replay buffer. There are likely no files\")\n srcs = os.listdir(self.storage_path)\n for src in srcs:\n shutil.move(os.path.join(self.storage_path, src), os.path.join(path, src))\n print(\"Successfully saved\", len(srcs), \"episodes.\")\n else:\n ep_len = self._storage.size\n ep_idx = 0\n ts = datetime.datetime.now().strftime(\"%Y%m%dT%H%M%S\")\n ep_filename = f\"{ts}_{ep_idx}_{ep_len}.npz\"\n save_path = os.path.join(path, ep_filename)\n self._storage.save(save_path)\n\n def sample(self, *args, **kwargs):\n return self.sample_fn(self._storage, *args, **kwargs)\n\n def __iter__(self):\n assert not hasattr(self, \"_iterated\"), \"__iter__ called twice!\"\n self._iterated = True\n worker_info = torch.utils.data.get_worker_info()\n assert (worker_info is not None) == self.distributed, \"ReplayBuffer.distributed not set correctly!\"\n\n # allocate the buffer with the given capacity\n if self.distributed:\n self._alloc(None if self.capacity is None else self.capacity // worker_info.num_workers)\n self._episode_filenames = set()\n\n self._learning_online = False\n\n samples_since_last_offline_fetch = 0\n samples_since_last_online_fetch = 0\n last_offline_fetch_size = 0\n\n batch_size = self.sample_fn.keywords.get(\"batch_size\", 1)\n stack_size = self.sample_fn.keywords.get(\"stack\", 1)\n seq_size = self.sample_fn.keywords.get(\"seq_length\", 1)\n\n while True:\n if self._storage.size < seq_size * stack_size + 1:\n yield {} # If the buffer is too small for sampling, continue.\n else:\n sample = self.sample_fn(self._storage)\n if batch_size == 1:\n sample = utils.squeeze(sample, 0)\n yield sample\n\n # Fetch new data if we have a circular buffer.\n if isinstance(self._storage, storage.CircularStorage):\n if self.distributed: # Always check for online data\n # We fetch from the online buffer\n samples_since_last_online_fetch += 1\n if samples_since_last_online_fetch >= self.fetch_every:\n fetch_size = self._fetch_online()\n self._learning_online = self._learning_online or (fetch_size > 0)\n samples_since_last_online_fetch = 0\n\n if not self._learning_online and self.path is not None:\n # We fetch from the offline buffer\n samples_since_last_offline_fetch += 1\n data_pts_since_last_offline_fetch = (\n samples_since_last_offline_fetch * batch_size * seq_size * stack_size\n )\n if data_pts_since_last_offline_fetch >= last_offline_fetch_size * self.epoch_ratio:\n last_offline_fetch_size = self._fetch_offline()\n samples_since_last_offline_fetch = 0\n\n def __del__(self):\n if not self.distributed:\n return\n if self.cleanup:\n return\n else:\n paths = [os.path.join(self.storage_path, f) for f in os.listdir(self.storage_path)]\n for path in paths:\n try:\n os.remove(path)\n except OSError:\n pass\n try:\n os.rmdir(self.storage_path)\n except OSError:\n pass" }, { "identifier": "storage", "path": "research/datasets/replay_buffer/storage.py", "snippet": "def load_data(path: str, exclude_keys: Optional[List[str]]) -> Dict:\ndef save_data(data: Dict, path: str) -> None:\ndef get_bytes(buffer: Union[Dict, np.ndarray]) -> int:\n def capacity(self):\n def size(self):\n def starts(self):\n def ends(self):\n def lengths(self):\n def bytes(self):\n def save(self, path):\n def __getitem__(self, key):\n def __getattr__(self, name):\n def __contains__(self, key):\n def add(self, data):\n def extend(self, data):\n def __init__(self, buffers: Dict) -> None:\n def add(self, data):\n def extend(self, data):\n def __init__(self, initial_capacity: int = 100, dtype=np.int64):\n def _reset(self):\n def append(self, value):\n def pop(self):\n def popleft(self):\n def view(self):\n def __len__(self):\n def first(self):\n def last(self):\n def __str__(self):\n def __init__(self, buffer_space: Union[Dict, gym.spaces.Dict], capacity: Optional[int] = None) -> None:\n def _update_markers(self, new_ends: Iterable = ()):\n def add(self, data):\n def extend(self, data):\nclass Storage(abc.ABC):\nclass FixedStorage(Storage):\nclass NPQueue(object):\nclass CircularStorage(Storage):" }, { "identifier": "EmptyEnv", "path": "research/envs/base.py", "snippet": "class EmptyEnv(gym.Env):\n\n \"\"\"\n An empty holder for defining supervised learning problems\n It works by specifying the ranges and shapes.\n \"\"\"\n\n def __init__(\n self,\n observation_low=None,\n observation_high=None,\n observation_shape=None,\n observation_dtype=np.float32,\n observation_space=None,\n action_low=None,\n action_high=None,\n action_shape=None,\n action_dtype=np.float32,\n action_space=None,\n ):\n if observation_space is not None:\n self.observation_space = observation_space\n else:\n self.observation_space = _get_space(observation_low, observation_high, observation_shape, observation_dtype)\n if action_space is not None:\n self.action_space = action_space\n else:\n self.action_space = _get_space(action_low, action_high, action_shape, action_dtype)\n\n def step(self, action):\n raise NotImplementedError(\"Empty Env does not have step\")\n\n def reset(self, **kwargs):\n raise NotImplementedError(\"Empty Env does not have reset\")" }, { "identifier": "ModuleContainer", "path": "research/networks/base.py", "snippet": "class ModuleContainer(torch.nn.Module):\n CONTAINERS = []\n\n def __init__(self, observation_space: gym.Space, action_space: gym.Space, **kwargs) -> None:\n super().__init__()\n # save the classes and containers\n base_kwargs = {k: v for k, v in kwargs.items() if not k.endswith(\"_class\") and not k.endswith(\"_kwargs\")}\n\n output_space = observation_space\n for container in self.CONTAINERS:\n module_class = kwargs.get(container + \"_class\", torch.nn.Identity)\n module_class = vars(research.networks)[module_class] if isinstance(module_class, str) else module_class\n if module_class is torch.nn.Identity:\n module_kwargs = dict()\n else:\n module_kwargs = base_kwargs.copy()\n module_kwargs.update(kwargs.get(container + \"_kwargs\", dict()))\n # Create the module, and attach it to self\n module = module_class(output_space, action_space, **module_kwargs)\n setattr(self, container, module)\n\n # Set a reset function\n setattr(self, \"reset_\" + container, partial(self._reset, container))\n\n if hasattr(getattr(self, container), \"output_space\"):\n # update the output space\n output_space = getattr(self, container).output_space\n\n # Done creating all sub-modules.\n\n @classmethod\n def create_subset(cls, containers):\n assert all([container in cls.CONTAINERS for container in containers])\n name = \"\".join([container.capitalize() for container in containers]) + \"Subset\"\n return type(name, (ModuleContainer,), {\"CONTAINERS\": containers})\n\n def _reset(self, container: str) -> None:\n module = getattr(self, container)\n with torch.no_grad():\n module.apply(reset)\n\n def compile(self, **kwargs):\n for container in self.CONTAINERS:\n attr = getattr(self, container)\n if type(attr).forward == torch.nn.Module.forward:\n assert hasattr(attr, \"compile\"), (\n \"container \" + container + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, container, torch.compile(attr, **kwargs))\n\n def forward(self, x):\n # Use all of the modules in order\n for container in self.CONTAINERS:\n x = getattr(self, container)(x)\n return x" }, { "identifier": "runners", "path": "research/utils/runners.py", "snippet": "class CloudpickleWrapper:\nclass AsyncState(Enum):\nclass AsyncEnv(gym.Env):\nclass MPRunner(object):\n def __init__(self, fn: Callable):\n def __getstate__(self):\n def __setstate__(self, ob):\n def __call__(self):\ndef alloc_shared_buffer(space: Any):\ndef read_shared_buffer(shared_buffer: Any, space: gym.Space):\ndef write_shared_buffer(shared_buffer: Any, space: gym.Space, value: Any):\n def __init__(\n self, env_fn: Callable, observation_space: Optional[gym.Space] = None, action_space: Optional[gym.Space] = None\n ):\n def step_send(self, action):\n def step_recv(self):\n def step(self, action):\n def reset_send(self):\n def reset_recv(self):\n def reset(self):\n def close(self):\ndef _async_env_worker(env_fn, pipe, parent_pipe, obs_buffer, action_buffer):\n def __init__(\n self,\n env_fn,\n fn: Optional[Callable] = None,\n observation_space: Optional[gym.Space] = None,\n action_space: Optional[gym.Space] = None,\n **kwargs,\n ):\n def start(self, fn: Optional[Callable] = None, **kwargs):\n def started(self):\n def __call__(self, block=False):\n def step(self, *args, **kwargs):\n def reset(self, *args, **kwargs):\n def close(self):\n DEFAULT = \"default\"\n WAITING_RESET = \"reset\"\n WAITING_STEP = \"step\"" }, { "identifier": "utils", "path": "research/utils/utils.py", "snippet": "def to_device(batch: Any, device: torch.device) -> Any:\ndef to_tensor(batch: Any) -> Any:\ndef to_np(batch: Any) -> Any:\ndef remove_float64(batch: Any):\ndef unsqueeze(batch: Any, dim: int) -> Any:\ndef squeeze(batch: Any, dim: int) -> Any:\ndef get_from_batch(batch: Any, start: Union[int, np.ndarray, torch.Tensor], end: Optional[int] = None) -> Any:\ndef set_in_batch(batch: Any, value: Any, start: int, end: Optional[int] = None) -> None:\ndef batch_copy(batch: Any) -> Any:\ndef space_copy(space: gym.Space):\ndef contains_tensors(batch: Any) -> bool:\ndef get_device(batch: Any) -> Optional[torch.device]:\ndef concatenate(*args, dim: int = 0):\ndef append(lst, item):\ndef extend(lst1, lst2):\n def __init__(self, name: str = \"\"):\n def forward(self, x: Any) -> Any:\ndef np_dataset_alloc(\n space: gym.Space, capacity: int, begin_pad: Tuple[int] = tuple(), end_pad: Tuple[int] = tuple()\n) -> np.ndarray:\ndef np_bytes_per_instance(space: gym.Space) -> int:\ndef _flatten_dict_helper(flat_dict: Dict, value: Any, prefix: str, separator: str = \".\") -> None:\ndef flatten_dict(d: Dict, separator: str = \".\") -> Dict:\ndef nest_dict(d: Dict, separator: str = \".\") -> Dict:\ndef fetch_from_dict(d: Dict, keys: Union[str, List, Tuple], separator=\".\") -> List[Any]:\ndef create_optim_groups(params, kwargs):\nclass PrintNode(torch.nn.Module):" }, { "identifier": "Algorithm", "path": "research/algs/base.py", "snippet": "class Algorithm(ABC):\n _save_keys: Set[str]\n _compiled: bool\n\n def __init__(\n self,\n observation_space: gym.Space,\n action_space: gym.Space,\n network_class: Type[torch.nn.Module],\n dataset_class: Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]],\n network_kwargs: Optional[Dict] = None,\n dataset_kwargs: Optional[Dict] = None,\n validation_dataset_class: Optional[\n Union[Type[torch.utils.data.IterableDataset], Type[torch.utils.data.Dataset]]\n ] = None,\n validation_dataset_kwargs: Optional[Dict] = None,\n optim_class: Type[torch.optim.Optimizer] = torch.optim.Adam,\n optim_kwargs: Optional[Dict] = None,\n schedulers_class: Optional[Dict] = None,\n schedulers_kwargs: Optional[Dict[str, Dict]] = None,\n processor_class: Optional[Type[Processor]] = None,\n processor_kwargs: Optional[Dict] = None,\n checkpoint: Optional[str] = None,\n device: Union[str, torch.device] = \"auto\",\n ):\n # Initialize the _save_keys attribute using the superclass.\n # These are used for automatically identifying keys for saving/loading.\n super().__setattr__(\"_save_keys\", set())\n super().__setattr__(\"_module_keys\", set())\n super().__setattr__(\"_compiled\", False)\n\n # Save relevant values\n self.observation_space = observation_space\n self.action_space = action_space\n self.optim = {}\n\n # setup devices\n if device == \"auto\":\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self._device = torch.device(device)\n\n # Setup the data preprocessor first. Thus, if we need to reference it in network setup we can.\n # Everything here is saved in self.processor\n self.setup_processor(processor_class, {} if processor_kwargs is None else processor_kwargs)\n\n # Create the network.\n network_kwargs = {} if network_kwargs is None else network_kwargs\n self.setup_network(network_class, network_kwargs)\n\n # Save values for optimizers, which will be lazily initialized later\n self.optim = {}\n self.optim_class = optim_class\n self.optim_kwargs = {\"lr\": 0.0001} if optim_kwargs is None else optim_kwargs\n\n # Save values for schedulers, which will be lazily initialized later\n self.schedulers = {}\n self.schedulers_class = {} if schedulers_class is None else schedulers_class\n self.schedulers_kwargs = {} if schedulers_kwargs is None else schedulers_kwargs\n\n # Save values for datasets, which will be lazily initialized later\n self.dataset_class = dataset_class\n self.dataset_kwargs = {} if dataset_kwargs is None else dataset_kwargs\n self.validation_dataset_class = validation_dataset_class\n self.validation_dataset_kwargs = validation_dataset_kwargs\n\n self._training = False\n\n # Load a check point if we have one -- using non-strict enforcement.\n # NOTE: this only loads the network and will _not_ load the optimizer checkpoint.\n if checkpoint is not None:\n self.load(checkpoint, strict=False)\n\n @property\n def device(self):\n return self._device\n\n @property\n def training(self) -> bool:\n return self._training\n\n def __setattr__(self, name: str, value: Any) -> None:\n # Check to see if the value is a module etc.\n if (hasattr(self, \"_save_keys\") and name in self._save_keys) or (\n hasattr(self, \"_module_keys\") and name in self._module_keys\n ):\n pass\n elif isinstance(value, torch.nn.Parameter):\n self._save_keys.add(name)\n elif isinstance(value, torch.nn.Module):\n self._module_keys.add(name)\n if sum(p.numel() for p in value.parameters()) > 0:\n self._save_keys.add(name) # store if we have a module with more than zero parameters.\n return super().__setattr__(name, value)\n\n @property\n def save_keys(self) -> List[str]:\n return self._save_keys\n\n @property\n def module_keys(self) -> List[str]:\n return self._module_keys\n\n @property\n def compiled(self) -> bool:\n return self._compiled\n\n def to(self, device) -> \"Algorithm\":\n for k in self.save_keys:\n if k == \"processor\" and not self.processor.supports_gpu:\n continue\n else:\n setattr(self, k, getattr(self, k).to(device))\n return self\n\n def compile(self, **kwargs):\n for k in self.save_keys:\n attr = getattr(self, k)\n if isinstance(attr, torch.nn.Module):\n if type(attr).forward == torch.nn.Module.forward:\n # In this case, the forward method hasn't been overriden.\n # Thus we assume there is a compile argument.\n assert hasattr(attr, \"compile\"), (\n \"save key \" + k + \" is nn.Module without forward() but didn't define `compile`.\"\n )\n attr.compile(**kwargs)\n else:\n setattr(self, k, torch.compile(attr, **kwargs))\n # indicate that we have compiled the models.\n self._compiled = True\n\n def train(self) -> None:\n for k in self._module_keys:\n getattr(self, k).train()\n self._training = True\n\n def eval(self) -> None:\n for k in self._module_keys:\n getattr(self, k).eval()\n self._training = False\n\n @property\n def num_params(self):\n _num_params = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n _num_params += sum(p.numel() for p in attr.parameters() if p.requires_grad)\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n if attr.requires_grad:\n _num_params += attr.numel()\n return _num_params\n\n @property\n def nbytes(self):\n # Returns the size of all the parameters in bytes\n _bytes = 0\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n for p in attr.parameters():\n _bytes += p.nelement() * p.element_size()\n if hasattr(attr, \"buffers\"):\n for b in attr.buffers():\n _bytes += b.nelement() * b.element_size()\n return _bytes\n\n def setup_processor(self, processor_class: Optional[Type[Processor]], processor_kwargs: Dict) -> None:\n if processor_class is None:\n processor = Identity(self.observation_space, self.action_space)\n else:\n processor = processor_class(self.observation_space, self.action_space, **processor_kwargs)\n\n if processor.supports_gpu: # move it to device if it supports GPU computation.\n self.processor = processor.to(self.device)\n else:\n self.processor = processor\n\n def setup_network(self, network_class: Type[torch.nn.Module], network_kwargs: Dict) -> None:\n self.network = network_class(\n self.processor.observation_space, self.processor.action_space, **network_kwargs\n ).to(self.device)\n\n def setup_optimizers(self) -> None:\n \"\"\"\n This is only called by the Trainer, and not called when we load the model.\n This is done so that inference jobs don't load the optimizer state.\n \"\"\"\n # Setup Optimizers\n assert len(self.optim) == 0, \"setup_optimizers called twice!\"\n for k in self.save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"parameters\"):\n parameters = attr.parameters()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n parameters = [attr]\n # Constrcut the optimizer\n self.optim[k] = self.optim_class(parameters, **self.optim_kwargs)\n\n def setup_schedulers(self):\n assert len(self.schedulers) == 0, \"setup_schedulers called twice!\"\n for k in self.schedulers_class.keys():\n if self.schedulers_class[k] is not None:\n assert k in self.optim, \"Did not find schedule key in optimizers dict.\"\n self.schedulers[k] = self.schedulers_class[k](self.optim[k], **self.schedulers_kwargs.get(k, dict()))\n\n def setup_datasets(self, env: gym.Env, total_steps: int):\n \"\"\"\n Called after everything else has been setup, right before training starts\n This is _only_ called by the trainer and is not called by default.\n This function is responsible for creating the following attributes:\n self.dataset (required)\n self.validation_dataset\n \"\"\"\n assert not hasattr(self, \"dataset\"), \"setup_datasets called twice!\"\n assert not hasattr(self, \"validation_dataset\"), \"setup_datasets called twice!\"\n # Setup the train dataset\n self.dataset = self.dataset_class(self.observation_space, self.action_space, **self.dataset_kwargs)\n # Setup the validation dataset\n if self.validation_dataset_class is not None:\n self.validation_dataset = self.validation_dataset_class(\n self.observation_space, self.action_space, **self.validation_dataset_kwargs\n )\n elif self.validation_dataset_kwargs is not None:\n validation_dataset_kwargs = copy.deepcopy(self.dataset_kwargs)\n validation_dataset_kwargs.update(self.validation_dataset_kwargs)\n self.validation_dataset = self.dataset_class(\n self.observation_space, self.action_space, **validation_dataset_kwargs\n )\n else:\n self.validation_dataset = None\n\n def save(self, path: str, extension: str, metadata: Optional[Dict] = None) -> None:\n \"\"\"\n Saves a checkpoint of the model and the optimizers\n \"\"\"\n save_dict = {}\n if len(self.optim) > 0:\n save_dict[\"optim\"] = {k: v.state_dict() for k, v in self.optim.items()}\n if len(self.schedulers) > 0:\n save_dict[\"schedulers\"] = {k: v.state_dict() for k, v in self.schedulers.items()}\n for k in self._save_keys:\n attr = getattr(self, k)\n if hasattr(attr, \"state_dict\"):\n save_dict[k] = attr.state_dict()\n else:\n assert isinstance(attr, torch.nn.Parameter), \"Can only save Modules or Parameters.\"\n save_dict[k] = attr\n\n # Add the metadata\n save_dict[\"metadata\"] = {} if metadata is None else metadata\n save_path = os.path.join(path, extension)\n if not save_path.endswith(\".pt\"):\n save_path += \".pt\"\n torch.save(save_dict, save_path)\n\n def load(self, checkpoint: str, strict: bool = True) -> Dict:\n \"\"\"\n Loads the model and its associated checkpoints.\n If we haven't created the optimizers and schedulers, do not load those.\n \"\"\"\n print(\"[research] loading checkpoint:\", checkpoint)\n checkpoint = torch.load(checkpoint, map_location=self.device)\n remaining_checkpoint_keys = set(checkpoint.keys())\n\n # First load everything except for the optim\n for k in self.save_keys: # Loop through keys in the Algorithm.\n if k not in checkpoint:\n if strict:\n raise ValueError(\"Checkpoint did not have key \" + str(k))\n else:\n print(\"[research] Warning: Checkpoint did not have key\", k)\n continue\n\n if isinstance(getattr(self, k), torch.nn.Parameter):\n # directly set the data, this is for nn.Parameters\n getattr(self, k).data = checkpoint[k].data\n else:\n # Otherwise, load via state dict\n getattr(self, k).load_state_dict(checkpoint[k], strict=strict)\n remaining_checkpoint_keys.remove(k)\n\n # Now load the optimizer and its associated keys\n for k in self.optim.keys():\n if strict and k not in checkpoint[\"optim\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find optimizer key\")\n elif k not in checkpoint[\"optim\"]:\n print(\"[research] Warning: Checkpoint did not have optimizer key\", k)\n continue\n self.optim[k].load_state_dict(checkpoint[\"optim\"][k])\n if \"optim\" in checkpoint:\n remaining_checkpoint_keys.remove(\"optim\")\n\n # Now load the schedulers\n for k in self.schedulers.keys():\n if strict and k not in checkpoint[\"schedulers\"]:\n raise ValueError(\"Strict mode was enabled, but couldn't find scheduler key\")\n elif k not in checkpoint[\"schedulers\"]:\n print(\"[research] Warning: Checkpoint did not have scheduler key\", k)\n continue\n self.schedulers[k].load_state_dict(checkpoint[\"schedulers\"][k])\n if \"schedulers\" in checkpoint:\n remaining_checkpoint_keys.remove(\"schedulers\")\n\n remaining_checkpoint_keys.remove(\"metadata\") # Do not count metadata key, which is always addded.\n if strict and len(remaining_checkpoint_keys) > 0:\n raise ValueError(\"Algorithm did not have keys \", +str(remaining_checkpoint_keys))\n elif len(remaining_checkpoint_keys) > 0:\n print(\"[research] Warning: Checkpoint keys\", remaining_checkpoint_keys, \"were not loaded.\")\n\n return checkpoint[\"metadata\"]\n\n def format_batch(self, batch: Any) -> Any:\n # Convert items to tensor if they are not.\n # Checking first makes sure we do not distrub memory pinning\n if not utils.contains_tensors(batch):\n batch = utils.to_tensor(batch)\n if self.processor.supports_gpu:\n # Move to CUDA first.\n batch = utils.to_device(batch, self.device)\n batch = self.processor(batch)\n else:\n batch = self.processor(batch)\n batch = utils.to_device(batch, self.device)\n return batch\n\n @abstractmethod\n def train_step(self, batch: Any, step: int, total_steps: int) -> Dict:\n \"\"\"\n Train the model. Should return a dict of loggable values\n \"\"\"\n return {}\n\n def validation_step(self, batch: Any) -> Dict:\n \"\"\"\n perform a validation step. Should return a dict of loggable values.\n \"\"\"\n raise NotImplementedError\n\n def env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict:\n \"\"\"\n Perform any extra training operations. This is done before the train step is called.\n A common use case for this would be stepping the environment etc.\n \"\"\"\n return {}\n\n def validation_extras(self, path: str, step: int) -> Dict:\n \"\"\"\n Perform any extra validation operations.\n A common usecase for this is saving visualizations etc.\n \"\"\"\n return {}\n\n def _predict(self, batch: Any, **kwargs) -> Any:\n \"\"\"\n Internal prediction function, can be overridden\n By default, we call torch.no_grad(). If this behavior isn't desired,\n override the _predict funciton in your algorithm.\n \"\"\"\n with torch.no_grad():\n if len(kwargs) > 0:\n raise ValueError(\"Default predict method does not accept key word args, but they were provided.\")\n pred = self.network(batch)\n return pred\n\n def predict(self, batch: Any, is_batched: bool = False, **kwargs) -> Any:\n is_np = not utils.contains_tensors(batch)\n if not is_batched:\n # Unsqeeuze everything\n batch = utils.unsqueeze(batch, 0)\n batch = self.format_batch(batch)\n pred = self._predict(batch, **kwargs)\n if not is_batched:\n pred = utils.get_from_batch(pred, 0)\n if is_np:\n pred = utils.to_np(pred)\n return pred" } ]
import datetime import functools import os import sys import tempfile import gym import numpy as np import torch from abc import abstractmethod from typing import Any, Dict, Optional, Union from research.datasets import ReplayBuffer from research.datasets.replay_buffer import storage from research.envs.base import EmptyEnv from research.networks.base import ModuleContainer from research.utils import runners, utils from .base import Algorithm from research.utils.config import Config
11,302
self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high) return utils.to_device(utils.to_tensor(action_range), self.device) def _predict( self, batch: Dict, sample: bool = False, noise: float = 0.0, noise_clip: Optional[float] = None, temperature=1.0 ) -> torch.Tensor: with torch.no_grad():
class OffPolicyAlgorithm(Algorithm): def __init__( self, *args, offline_steps: int = 0, # Run fully offline by setting to -1 random_steps: int = 1000, async_runner_ep_lag: int = 1, **kwargs, ): super().__init__(*args, **kwargs) self.offline_steps = offline_steps self.random_steps = random_steps self.async_runner_ep_lag = async_runner_ep_lag def setup_datasets(self, env: gym.Env, total_steps: int): super().setup_datasets(env, total_steps) # Assign the correct update function based on what is passed in. if env is None or isinstance(env, EmptyEnv) or self.offline_steps < 0: self.env_step = self._empty_step elif isinstance(env, runners.AsyncEnv): self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 self._resetting = True env.reset_send() # Ask the env to start resetting. self.env_step = self._async_env_step elif isinstance(env, runners.MPRunner): assert isinstance(self.dataset, ReplayBuffer), "must use replaybuffer for MP RUnner." assert self.dataset.distributed, "ReplayBuffer must be distributed for use with Fully MPRunner." # Launch the runner subprocess. self._eps_since_last_checkpoint = 0 self._checkpoint_dir = tempfile.mkdtemp(prefix="checkpoints_") assert self.offline_steps <= 0, "MPRunner does not currently support offline to online." env.start( fn=_off_policy_collector_subprocess, checkpoint_path=self._checkpoint_dir, storage_path=self.dataset.storage_path, random_steps=self.random_steps, exclude_keys=self.dataset.exclude_keys, total_steps=total_steps, ) self.env_step = self._runner_env_step elif isinstance(env, gym.Env): # Setup Env Metrics self._current_obs = env.reset() self._episode_reward = 0 self._episode_length = 0 self._num_ep = 0 self._env_steps = 0 # Note that currently the very first (s, a) pair is thrown away because # we don't add to the dataset here. # This was done for better compatibility for offline to online learning. self.dataset.add(obs=self._current_obs) # add the first observation. self.env_step = self._env_step else: raise ValueError("Invalid env passed") def _empty_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: return dict() def _env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Return if env is Empty or we we aren't at every env_freq steps if step <= self.offline_steps: # Purposefully set to nan so we write CSV log. return dict(steps=self._env_steps, reward=-np.inf, length=np.inf, num_ep=self._num_ep) if step < self.random_steps: action = env.action_space.sample() else: self.eval() action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): action = np.clip(action, env.action_space.low, env.action_space.high) next_obs, reward, done, info = env.step(action) self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward if "discount" in info: discount = info["discount"] elif hasattr(env, "_max_episode_steps") and self._episode_length == env._max_episode_steps: discount = 1.0 else: discount = 1 - float(done) # Store the consequences. self.dataset.add(obs=next_obs, action=action, reward=reward, done=done, discount=discount) if done: self._num_ep += 1 # Compute metrics metrics = dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) # Reset the environment self._current_obs = env.reset() self.dataset.add(obs=self._current_obs) # Add the first timestep self._episode_length = 0 self._episode_reward = 0 return metrics else: self._current_obs = next_obs return dict(steps=self._env_steps) def _async_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # Recieve Data from the last step and add to buffer. Should only call recv! if self._resetting: self._current_obs = env.reset_recv() self._num_ep += 1 self._episode_length = 0 self._episode_reward = 0 self.dataset.add(obs=self._current_obs) self._resetting = False done = False else: self._current_obs, reward, done, info = env.step_recv() self._env_steps += 1 self._episode_length += 1 self._episode_reward += reward self.dataset.add( obs=self._current_obs, action=self._current_action, reward=reward, done=done, discount=info["discount"] ) # Send data for the next step and return metrics. Should only call send! if done: # If the episode terminated, then we need to reset and send the reset message self._resetting = True env.reset_send() return dict( steps=self._env_steps, reward=self._episode_reward, length=self._episode_length, num_ep=self._num_ep ) else: # Otherwise, compute the action we should take and send it. self._resetting = False if step < self.random_steps: self._current_action = env.action_space.sample() else: self.eval() self._current_action = self._get_train_action(self._current_obs, step, total_steps) self.train() if isinstance(env.action_space, gym.spaces.Box): self._current_action = np.clip(self._current_action, env.action_space.low, env.action_space.high) env.step_send(self._current_action) return dict(steps=self._env_steps) def _runner_env_step(self, env: gym.Env, step: int, total_steps: int) -> Dict: # All we do is check the pipe to see if there is data! metrics = env() if len(metrics) > 0: # If the metrics are non-empty, then it means that we have completed an episode. # As such, decrement the counter self._eps_since_last_checkpoint += 1 if self._eps_since_last_checkpoint == self.async_runner_ep_lag: self.save(self._checkpoint_dir, str(step), dict(step=step)) self._eps_since_last_checkpoint = 0 return metrics @abstractmethod def _get_train_action(self, obs: Any, step: int, total_steps: int) -> np.ndarray: raise NotImplementedError @functools.cached_property def action_range(self): action_range = (self.processor.action_space.low, self.processor.action_space.high) return utils.to_device(utils.to_tensor(action_range), self.device) def _predict( self, batch: Dict, sample: bool = False, noise: float = 0.0, noise_clip: Optional[float] = None, temperature=1.0 ) -> torch.Tensor: with torch.no_grad():
if isinstance(self.network, ModuleContainer) and "encoder" in self.network.CONTAINERS:
3
2023-10-19 17:25:45+00:00
16k
bcmi/libcom
libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('assets/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates\n else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n # remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def reset_num_updates(self):\n del self.num_updates\n self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int))\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "IdentityFirstStage", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ema_decay=None,\n learn_logvar=False\n ):\n super().__init__()\n self.learn_logvar = learn_logvar\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n self.use_ema = ema_decay is not None\n if self.use_ema:\n self.ema_decay = ema_decay\n assert 0. < ema_decay < 1.\n self.model_ema = LitEma(self, decay=ema_decay)\n print(f\"Keeping EMAs of {len(list(self.model_ema.buffers()))}.\")\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n @contextmanager\n def ema_scope(self, context=None):\n if self.use_ema:\n self.model_ema.store(self.parameters())\n self.model_ema.copy_to(self)\n if context is not None:\n print(f\"{context}: Switched to EMA weights\")\n try:\n yield None\n finally:\n if self.use_ema:\n self.model_ema.restore(self.parameters())\n if context is not None:\n print(f\"{context}: Restored training weights\")\n\n def on_train_batch_end(self, *args, **kwargs):\n if self.use_ema:\n self.model_ema(self)\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n log_dict = self._validation_step(batch, batch_idx)\n with self.ema_scope():\n log_dict_ema = self._validation_step(batch, batch_idx, postfix=\"_ema\")\n return log_dict\n\n def _validation_step(self, batch, batch_idx, postfix=\"\"):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\"+postfix)\n\n self.log(f\"val{postfix}/rec_loss\", log_dict_ae[f\"val{postfix}/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n ae_params_list = list(self.encoder.parameters()) + list(self.decoder.parameters()) + list(\n self.quant_conv.parameters()) + list(self.post_quant_conv.parameters())\n if self.learn_logvar:\n print(f\"{self.__class__.__name__}: Learning logvar\")\n ae_params_list.append(self.loss.logvar)\n opt_ae = torch.optim.Adam(ae_params_list,\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, log_ema=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "libcom/painterly_image_harmonization/source/PHDiffusion/ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps, verbose=verbose)\n\n # alphas_cumprod = self.model.alphas_cumprod\n # assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n # self.register_buffer('betas', to_torch(self.model.betas))\n # self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n # self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n betas = torch.linspace(0.00085, 0.012, 1000, dtype=torch.float32).cpu()\n alphas = 1.0 - betas\n alphas_cumprod = torch.cumprod(alphas, dim=0)\n alphas_cumprod_prev = torch.from_numpy(np.append(1., alphas_cumprod[:-1]))\n\n self.register_buffer('betas', to_torch(betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev.to(torch.float32).to(self.model.device)\n)\n\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta, verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n\n print(ddim_sigmas)\n print(ddim_alphas)\n\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n features_adapter=None,\n append_to_context=None,\n cond_tau=0.4,\n style_cond_tau=1.0,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n features_adapter=features_adapter,\n append_to_context=append_to_context,\n cond_tau=cond_tau,\n style_cond_tau=style_cond_tau,\n\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,\n append_to_context=None, cond_tau=0.4, style_cond_tau=1.0,):\n device = self.model.betas.device\n b = shape[0]\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n\n time_range = reversed(range(0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n def get_timesteps(num_inference_steps, strength, tt):\n # get the original timestep using init_timestep\n print(time_range)\n\n init_timestep = min(int(num_inference_steps * strength), num_inference_steps)\n t_start = max(num_inference_steps - init_timestep, 0)\n print(t_start)\n\n tt = tt[t_start-1:]\n return tt, num_inference_steps - t_start\n time_range, num_inference_steps = get_timesteps(100, 0.3, time_range)\n\n print(time_range)\n\n\n if x_T is None:\n # img = torch.randn(shape, device=device)\n\n ts = torch.full((b,),time_range[0], device=device, dtype=torch.long)\n img = self.model.q_sample(x0, ts)\n\n else:\n img = x_T\n\n\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n\n\n iterator = tqdm(time_range[1:], desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n \n img = img_orig * (1.-mask) + mask * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n features_adapter=None if index < int(\n (1 - cond_tau) * total_steps) else features_adapter,\n append_to_context=None if index < int(\n (1 - style_cond_tau) * total_steps) else append_to_context,\n )\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, features_adapter=None,\n append_to_context=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n if append_to_context is not None:\n model_output = self.model.apply_model(x, t, torch.cat([c, append_to_context], dim=1),\n features_adapter=features_adapter)\n else:\n model_output = self.model.apply_model(x, t, c, features_adapter=features_adapter)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n if append_to_context is not None:\n pad_len = append_to_context.size(1)\n new_unconditional_conditioning = torch.cat(\n [unconditional_conditioning, unconditional_conditioning[:, -pad_len:, :]], dim=1)\n new_c = torch.cat([c, append_to_context], dim=1)\n c_in = torch.cat([new_unconditional_conditioning, new_c])\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in, features_adapter=features_adapter).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index], device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t ** 2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n return x_dec" } ]
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.ema import LitEma from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from libcom.painterly_image_harmonization.source.PHDiffusion.ldm.models.diffusion.ddim import DDIMSampler
12,446
if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=512, channels=3, log_every_t=100, clip_denoised=True, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization # print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) # count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3): linear_start = 0.00085 linear_end = 0.012 # if exists(given_betas): # betas = given_betas # else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] # if len(x.shape) == 3: # x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') # x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=0.00085, linear_end=0.012, cosine_s=8e-3): linear_start = 0.00085 linear_end = 0.012 super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out def decode_first_stage_training(self, z, predict_cids=False, force_not_quantize=False): # print('decoding...') # # def print_message(grad): # print('backward decoding') # # z.register_hook(print_message) if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c, **kwargs) return loss def get_time_with_schedule(self, scheduler, bs): if scheduler == 'linear': t = torch.randint(0, self.num_timesteps, (bs,), device=self.device).long() elif scheduler == 'cosine': t = torch.rand((bs, ), device=self.device) t = torch.cos(torch.pi / 2. * t) * self.num_timesteps t = t.long() elif scheduler == 'cubic': t = torch.rand((bs,), device=self.device) t = (1 - t ** 3) * self.num_timesteps t = t.long() else: raise NotImplementedError t = torch.clamp(t, min=0, max=self.num_timesteps-1) return t def forward(self, x,mask, c, *args, **kwargs): if 't' not in kwargs: t = torch.randint(0, self.num_timesteps, (x.shape[0], ), device=self.device).long() else: t = kwargs.pop('t') return self.p_losses(x,mask, c, t, *args, **kwargs) def apply_model(self, x_noisy, mask,t, cond, return_ids=False, **kwargs): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, mask,t, **cond, **kwargs) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, mask,cond, t, noise=None, **kwargs): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy,mask, t, cond, **kwargs) loss_dict = {} prefix = 'train' if self.training else 'val' # ops if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) pred_x0 = self.predict_start_from_noise(x_t=x_noisy, t=t, noise=model_output) return loss, loss_dict,pred_x0 def p_losses_origin(self, x_start, cond, t, noise=None, **kwargs): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond, **kwargs) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
16
2023-10-19 05:08:12+00:00
16k
e4s2023/E4S2023
optimization.py
[ { "identifier": "CelebAHQDataset", "path": "datasets/dataset.py", "snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n load_vis_img=False, fraction=1.0,\n flip_p=-1, # negative means not flipping\n specific_ids: Union[list, tuple] = None,\n paired: bool = False,\n shuffle: bool = False,\n ):\n assert mode in (\"train\", \"test\", \"all\"), \"CelebAHQDataset mode type unsupported!\"\n self.mode = mode\n if mode in (\"all\",):\n self.roots = [osp.join(dataset_root, \"train\"), osp.join(dataset_root, \"test\")]\n else:\n self.roots = [osp.join(dataset_root, self.mode)]\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.load_vis_img = load_vis_img\n self.fraction = fraction\n self.flip_p = flip_p\n self.paired = paired\n\n self.imgs = []\n self.labels = []\n self.labels_vis = []\n for root in self.roots:\n imgs = sorted(make_dataset(osp.join(root, \"images\")))\n imgs = imgs[:int(len(imgs)*self.fraction)]\n\n labels = sorted(make_dataset(osp.join(root, \"labels\")))\n labels = labels[:int(len(labels)*self.fraction)]\n\n labels_vis = sorted(make_dataset(osp.join(root, \"vis\"))) if self.load_vis_img else None\n labels_vis = labels_vis[:int(len(labels_vis)*self.fraction)] if self.load_vis_img else []\n\n self.imgs.extend(imgs)\n self.labels.extend(labels)\n self.labels_vis.extend(labels_vis)\n\n self.imgs, self.labels, self.labels_vis = self._filter_specific_ids(specific_ids)\n\n if self.load_vis_img:\n assert len(self.imgs) == len(self.labels) == len(self.labels_vis)\n else:\n assert len(self.imgs) == len(self.labels)\n\n print(f\"[CelebAHQDataset] files loaded. mode={self.mode}, #imgs={len(self.imgs)}, \"\n f\"#labels={len(self.labels)}, #vis={len(self.labels_vis)}\")\n\n # # 优化 600 个iteration 的style code保存路径\n # self.optim_codes_dir = \"/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v0_8_stage2_entypeSEAN/optim_Results\"\n \n # image pairs indices\n self.indices = np.arange(len(self.imgs))\n\n # TODO: shuffle the indices\n if shuffle:\n np.random.shuffle(self.indices)\n\n self.pair_indices = self.indices.reshape(-1, 2)\n\n def __len__(self):\n if not self.paired:\n return len(self.indices)\n else:\n return len(self.pair_indices)\n\n def _filter_specific_ids(self, specific_ids: tuple):\n \"\"\" filter the images according to the specific_ids\n \"\"\"\n if specific_ids is None:\n return self.imgs, self.labels, self.labels_vis\n elif self.fraction < 1.0:\n raise ValueError(\"[CelebAHQDataset] specific_ids and fraction cannot be set simultaneously!\")\n\n # parse the tuple into two lists, e.g. ((\"train\",\"12\"), (\"test\",\"45\")) -> (\"train\",\"train\") and (\"12\",\"45\")\n spec_modes, spec_ids = [], []\n id_order_dict = {}\n for idx, spec_id in enumerate(specific_ids):\n one_mode, one_id = spec_id[0], spec_id[1]\n spec_modes.append(one_mode)\n spec_ids.append(one_id)\n id_order_dict[one_id] = {\n \"mode\": one_mode, \"order\": idx,\n }\n\n # filter and re-order\n ret_imgs = [\"\"] * len(specific_ids)\n ret_labels = [\"\"] * len(specific_ids)\n ret_labels_vis = [\"\"] * len(specific_ids)\n found_cnt = 0\n for k in range(len(spec_ids)): # target specific ids\n one_spec_mode = spec_modes[k]\n one_spec_id = spec_ids[k]\n for idx in range(len(self.imgs)): # full dataset\n one_img = self.imgs[idx]\n one_label = self.labels[idx]\n one_label_vis = self.labels_vis[idx] if self.load_vis_img else None\n if one_spec_mode in one_img and one_spec_id == osp.basename(one_img): # found one\n found_cnt += 1\n one_spec_order = id_order_dict[one_spec_id][\"order\"]\n ret_imgs[one_spec_order] = one_img\n ret_labels[one_spec_order] = one_label\n ret_labels_vis[one_spec_order] = one_label_vis\n break\n\n if found_cnt < len(specific_ids):\n print(f\"[[Warning]][CelebAHQDataset] not enough images found (={found_cnt}) for \"\n f\"specific ids (={len(specific_ids)})!\")\n\n ret_imgs = list(filter(None, ret_imgs))\n ret_labels = list(filter(None, ret_labels))\n ret_labels_vis = list(filter(None, ret_labels_vis))\n return ret_imgs, ret_labels, ret_labels_vis\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img_path = self.imgs[index]\n img = Image.open(img_path).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n # label = osp.join(\"/apdcephfs/share_1290939/zhianliu/py_projects/our_editing/ui_results\",\"%s_mask.png\"%osp.basename(label)[:-4])\n label = Image.open(label).convert('L')\n if self.label_transform is not None:\n label = self.label_transform(label)\n\n if self.load_vis_img:\n label_vis = self.labels_vis[index]\n label_vis = Image.open(label_vis).convert('RGB')\n label_vis = TO_TENSOR(label_vis)\n else:\n label_vis = -1 # unified interface\n return img, label, label_vis, img_path\n\n def _output_item(self, idx):\n if not self.paired:\n index = self.indices[idx]\n img, label, label_vis, img_path = self.load_single_image(index)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n return img, label, label_vis, img_path\n else:\n index1 = self.indices[idx * 2]\n index2 = self.indices[idx * 2 + 1]\n img1, label1, label_vis1, img_path1 = self.load_single_image(index1)\n img2, label2, label_vis2, img_path2 = self.load_single_image(index2)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img1 = TF.hflip(img1)\n label1 = TF.hflip(label1)\n if random.random() < self.flip_p:\n img2 = TF.hflip(img2)\n label2 = TF.hflip(label2)\n return {\n \"bag1\": (img1, label1, label_vis1, img_path1),\n \"bag2\": (img2, label2, label_vis2, img_path2)\n }\n\n def __getitem__(self, idx):\n return self._output_item(idx)\n \n # # 1阶段重建的图片\n # img_name = osp.basename(self.imgs[index])[:-4]\n # recon_img = Image.open(osp.join(self.optim_codes_dir,img_name,\"%s_recon.png\"%img_name)).convert('RGB')\n # if self.img_transform is not None:\n # recon_img = self.img_transform(recon_img)\n \n # # 优化后的code\n # optim_code_path = osp.join(self.optim_codes_dir,img_name,\"%s_0600.npy\"%img_name)\n # assert osp.exists(optim_code_path), \"%s 文件不存在!\"%optim_code_path\n # optimed_style_code = np.load(optim_code_path)[0]\n \n # return img, recon_img, optimed_style_code, label, label_vis\n \n # pair_indices = self.pair_indices[idx, :]\n\n # img1, label1, label_vis1 = self.load_single_image(pair_indices[0])\n # img2, label2, label_vis2 = self.load_single_image(pair_indices[1])\n\n # return (img1, img2), (label1, label2), (label_vis1, label_vis2)" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" }, { "identifier": "MASK_CONVERT_TF", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask(celebAHQ_mask))" }, { "identifier": "MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda celebAHQ_mask: __celebAHQ_masks_to_faceParser_mask_detailed(celebAHQ_mask))" }, { "identifier": "FFHQ_MASK_CONVERT_TF_DETAILED", "path": "datasets/dataset.py", "snippet": "FFHQ_MASK_CONVERT_TF_DETAILED = transforms.Lambda(\n lambda mask: __ffhq_masks_to_faceParser_mask_detailed(mask))" }, { "identifier": "Net3", "path": "models/networks.py", "snippet": "class Net3(nn.Module):\n \"\"\" FSEncoder + styleGAN2 \"\"\"\n\n def __init__(self,opts,):\n super(Net3, self).__init__()\n self.opts=opts\n assert self.opts.fsencoder_type in [\"psp\",\"sean\"]\n if self.opts.fsencoder_type==\"psp\":\n self.encoder = FSEncoder_PSP(mode='ir_se', opts=self.opts)\n dim_s_code = 256 + 512 + 512\n else:\n self.encoder = FSEncoder_SEAN(input_nc=3, output_nc=512,in_size = 256)\n dim_s_code = 512\n \n self.split_layer_idx = 5\n self.remaining_layer_idx = self.opts.remaining_layer_idx\n \n # 区分component 的 W+ space 的 MLPs\n self.MLPs = nn.ModuleList()\n for i in range(self.opts.num_seg_cls):\n self.MLPs.append(\n LocalMLP(\n dim_component=dim_s_code,\n dim_style=512,\n num_w_layers= self.remaining_layer_idx if self.remaining_layer_idx != 17 else 18\n )\n )\n \n self.G = Generator(size=self.opts.out_size, style_dim=512, n_mlp=8, split_layer_idx = self.split_layer_idx, remaining_layer_idx = self.remaining_layer_idx)\n\n # styleGAN的参数是否更新\n if not self.opts.train_G:\n for param in self.G.parameters():\n param.requires_grad = False\n # 注意,styleGAN的8层FC是永远不更新的\n else:\n for param in self.G.style.parameters():\n param.requires_grad = False\n \n # styleGAN的倒数几层不更新 (包括convs 和 ToRGBs)\n if self.remaining_layer_idx != 17:\n for param in self.G.convs[-(17-self.remaining_layer_idx):].parameters():\n param.requires_grad = False\n for param in self.G.to_rgbs[-(17-self.remaining_layer_idx)//2 - 1:].parameters():\n param.requires_grad = False\n \n \n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n \"\"\"输入一张RGB图和对应的mask,\n (1) encoder 得到对应的F/S空间的特征,\n (2) 再送到styleGAN得到一张输出的图片\n\n Args:\n img (Tensor): 一对RGB图, each with shape [bs,3,1024,1024]\n mask ([type]): 一对RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n resize (bool, optional): G生成的图片是否 resize. Defaults to True.\n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 13, 512]\n \n \n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n # 1. 完全使用 style code i.e., G(w)\n images1, result_latent, structure_feats_GT = self.G([codes], structure_feats, mask, input_is_latent=True,\n randomize_noise=randomize_noise,return_latents=return_latents,\n use_structure_code=False)\n \n \n # # 2. 使用 style code 和 strcture code i.e., G(w,F)\n # images2, _ , _ = self.G([codes], structure_feats, mask, input_is_latent=True,\n # randomize_noise=randomize_noise,return_latents=return_latents,\n # use_structure_code=True)\n \n if return_latents:\n return images1, structure_feats_GT, result_latent\n else:\n return images1, structure_feats_GT\n\n def get_style(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style codes\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n structure_feats(Tensor): 图片的structure code, with shape [bs,512,32,32], 注意,这里其实是相对于StyleGAN第层输出的残差\n all_codes(Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]。\n !!! 注意,前7层的各个compnent其实没有意义,只是为了统一接口让shape保持一致,用的时候只用第1个即可 !!!\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return structure_feats, style_codes\n\n def get_style_vectors(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style vectors\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n style_vectors(Tensor): with shape [bs,#seg_cls,512]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n \n return style_vectors, structure_feats\n \n def cal_style_codes(self,style_vectors):\n \"\"\"根据每个compnent的 style vector转到styleGAN的style code\"\"\"\n \n codes=[]\n bs, num_comp = style_vectors.size(0), style_vectors.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](style_vectors[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](style_vectors.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return style_codes\n\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):\n \"\"\"输入一张mask 和 对应各components的style codes,以及这张图片的structure code, 生成一张图片\n \n Args:\n style_codes (Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]\n struc_codes (Tensor)\n mask (Tensor): mask图, with shape [bs,#seg_cls,1024,1024]\n \n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n \n images, result_latent, structure_feats = self.G([style_codes], struc_codes, mask, input_is_latent=True,\n randomize_noise=randomize_noise,noise=noise,return_latents=return_latents,\n use_structure_code=False)\n\n if return_latents:\n return images, result_latent, structure_feats\n else:\n return images,-1, structure_feats" }, { "identifier": "OptimOptions", "path": "options/optim_options.py", "snippet": "class OptimOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=str, default=\"/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/dummy\",help='Path to experiment output directory')\n\t\tself.parser.add_argument('--num_seg_cls', type=int, default=12,help='Segmentation mask class number')\n\t\tself.parser.add_argument('--begin_idx', type=int, default=408)\n\t\tself.parser.add_argument('--remaining_layer_idx', type=int, default=13, help='剩余的几层不用mask')\n \n # ================= 模型设置 相关 =====================\n\t\tself.parser.add_argument('--out_size', type=int, default=1024,help='output image size') \n\t\tself.parser.add_argument('--load_ema', default=False, type=bool, help='Whether to load the styleGAN EMA model')\n\t\tself.parser.add_argument('--n_styles', default=18, type=int, help='StyleGAN层数')\n\t\tself.parser.add_argument('--fsencoder_type', type=str, default=\"psp\", help='FS Encode网络类型') \n\t\tself.parser.add_argument('--checkpoint_path', default=\"/apdcephfs_cq2/share_1290939/branchwang/projects/pytorch-DDP-demo/iteration_300000.pt\", type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--train_G', default=False, type=bool, help='Whether to train the styleGAN model')\n \n # ================= 数据集 相关 =====================\n\t\tself.parser.add_argument('--dataset_root', default='/apdcephfs/share_1290939/zhianliu/datasets/CelebA-HQ', type=str, help='dataset root path')\n\t\tself.parser.add_argument('--ds_frac', default=1.0, type=float, help='dataset fraction')\n\t\tself.parser.add_argument('--test_batch_size', default=1, type=int, help='Batch size for testing and inference')\n\t\tself.parser.add_argument('--test_workers', default=4, type=int, help='Number of test/inference dataloader workers')\n\t\t\n\t\tself.parser.add_argument('--output_size', default=1024, type=int, help='Output size of generator')\n\t\tself.parser.add_argument('--device', default='cuda:0', type=str, help='Which GPU(s) to use')\n\n\t\tself.parser.add_argument('--start_from_latent_avg', action='store_true',default=True, help='Whether to add average latent vector to generate codes from encoder.')\n\t\tself.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space instead of w+')\n # ================= 优化 相关 =========================\n\t\tself.parser.add_argument('--num_layers', default=18, type=int)\n\t\tself.parser.add_argument('--lr', default=1e-2, type=float)\n\t\tself.parser.add_argument('--opt_name', type=str, default='adam', help='Optimizer to use in projected gradient descent')\n\t\tself.parser.add_argument('--W_steps', type=int, default=150, help='Number of W space optimization steps')\n\t\tself.parser.add_argument('--finetune_steps', type=int, default=100, help='Number of net finetune steps')\n\t\tself.parser.add_argument('--finetune_learning_rate', type=float, default=1e-3)\n\t\tself.parser.add_argument('--save_intermediate', action='store_true', help='Whether to store and save intermediate images during optimization')\n\t\tself.parser.add_argument('--verbose', action='store_true', help='Whether to verbose during optimization') \n\t\tself.parser.add_argument('--save_interval', type=int, default=100, help='Latent checkpoint interval')\n\t\tself.parser.add_argument('--output_dir', type=str, default='/apdcephfs/share_1290939/zhianliu/running_results/our_editing/tmp', help='Optimizer output dir')\n \n # ================= Loss 相关 =====================\n\t\tself.parser.add_argument('--lpips_lambda', default=0.8, type=float, help='LPIPS loss multiplier factor')\n\t\tself.parser.add_argument('--id_lambda', default=0.1, type=float, help='ID loss multiplier factor')\n\t\tself.parser.add_argument('--id_loss_multiscale', default=True, type=bool, help='Whether to apply multi scale in ID loss') \n\t\tself.parser.add_argument('--face_parsing_lambda', default=0.1, type=float, help='Face parsing loss multiplier factor')\t\t\n\t\tself.parser.add_argument('--l2_lambda', default=1.0, type=float, help='L2 loss multiplier factor')\n\t\t# self.parser.add_argument('--ir_se50_path', default='/apdcephfs_cq2/share_1290939/branchwang/pretrained_models/pixel2style2pixel/model_ir_se50.pth', type=str, help='Path to ir_se50 model weights')\n\t\tself.parser.add_argument('--ir_se50_path',\n\t\t\t\t\t\t\t\t default='./pretrained/pixel2style2pixel/model_ir_se50.pth',\n\t\t\t\t\t\t\t\t type=str, help='Path to ir_se50 model weights')\n\t\tself.parser.add_argument('--face_parsing_model_path', default='/apdcephfs_cq2/share_1290939/branchwang/pretrained_models/CelebA-Mask-HQ-faceParser/model.pth', type=str, help='Path to face parsing model weights')\n\t\n\tdef parse(self):\n\t\topts = self.parser.parse_args()\n\t\treturn opts" }, { "identifier": "IDLoss", "path": "criteria/id_loss.py", "snippet": "class IDLoss(nn.Module):\n def __init__(self,opts):\n super(IDLoss, self).__init__()\n print('Loading ResNet ArcFace')\n self.opts = opts \n \n self.face_pool_1 = torch.nn.AdaptiveAvgPool2d((256, 256))\n self.facenet = Backbone(input_size=112, num_layers=50, drop_ratio=0.6, mode='ir_se')\n self.facenet.load_state_dict(torch.load(opts.ir_se50_path))\n self.face_pool_2 = torch.nn.AdaptiveAvgPool2d((112, 112))\n self.facenet.eval()\n \n self.set_requires_grad(False)\n \n def set_requires_grad(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n \n def extract_feats(self, x):\n x = self.face_pool_1(x) if x.shape[2]!=256 else x # (1) resize to 256 if needed\n x = x[:, :, 35:223, 32:220] # (2) Crop interesting region\n x = self.face_pool_2(x) # (3) resize to 112 to fit pre-trained model\n x_feats = self.facenet(x, multi_scale=self.opts.id_loss_multiscale)\n return x_feats\n\n def forward(self, y_hat, y):\n n_samples = y.shape[0]\n y_feats_ms = self.extract_feats(y) # Otherwise use the feature from there\n y_hat_feats_ms = self.extract_feats(y_hat)\n y_feats_ms = [y_f.detach() for y_f in y_feats_ms] # 各个层的特征\n \n loss_all = 0\n sim_improvement_all = 0\n # 不同尺度\n for y_hat_feats, y_feats in zip(y_hat_feats_ms, y_feats_ms):\n \n loss = 0\n sim_improvement = 0\n count = 0\n # 不同的sample\n for i in range(n_samples):\n sim_target = y_hat_feats[i].dot(y_feats[i])\n sim_views = y_feats[i].dot(y_feats[i])\n \n loss += 1 - sim_target # id loss\n sim_improvement += float(sim_target) - float(sim_views)\n count += 1\n \n loss_all += loss / count\n sim_improvement_all += sim_improvement / count\n \n return loss_all, sim_improvement_all, None" }, { "identifier": "LPIPS", "path": "criteria/lpips/lpips.py", "snippet": "class LPIPS(nn.Module):\n r\"\"\"Creates a criterion that measures\n Learned Perceptual Image Patch Similarity (LPIPS).\n Arguments:\n net_type (str): the network type to compare the features:\n 'alex' | 'squeeze' | 'vgg'. Default: 'alex'.\n version (str): the version of LPIPS. Default: 0.1.\n \"\"\"\n def __init__(self, net_type: str = 'alex', version: str = '0.1'):\n\n assert version in ['0.1'], 'v0.1 is only supported now'\n\n super(LPIPS, self).__init__()\n\n # pretrained network\n self.net = get_network(net_type)\n\n # linear layers\n self.lin = LinLayers(self.net.n_channels_list)\n self.lin.load_state_dict(get_state_dict(net_type, version))\n\n def forward(self, x: torch.Tensor, y: torch.Tensor):\n feat_x, feat_y = self.net(x), self.net(y)\n\n diff = [(fx - fy) ** 2 for fx, fy in zip(feat_x, feat_y)]\n res = [l(d).mean((2, 3), True) for d, l in zip(diff, self.lin)]\n\n return torch.sum(torch.cat(res, 0)) / x.shape[0]" }, { "identifier": "StyleLoss", "path": "criteria/style_loss.py", "snippet": "class StyleLoss(nn.Module):\n def __init__(self, VGG16_ACTIVATIONS_LIST=[21], normalize=False, distance=\"l2\", in_size=256):\n\n super(StyleLoss, self).__init__()\n\n self.vgg16_act = VGG16_Activations(VGG16_ACTIVATIONS_LIST)\n self.vgg16_act.eval()\n\n ## ===== 修改 =====\n self.in_size = in_size\n # self.upsample2d = nn.Upsample(scale_factor=256 / in_size, mode=\"bilinear\", align_corners=True)\n ## ================\n \n self.normalize = normalize\n self.distance = distance\n\n def normalize_img(self, x):\n \"\"\"\n 将x的范围变到 适配 VGG 输入范围 \n \n https://pytorch.org/vision/stable/models.html\n \n x: [bs,3,H,W] 假设范围是 [-1,1]\n \"\"\"\n x = (x + 1) / 2\n \n mean = torch.from_numpy(VGG_MEAN).view(1,3,1,1).to(x.device)\n std = torch.from_numpy(VGG_STD).view(1,3,1,1).to(x.device)\n \n x = (x - mean) / std\n \n return x\n \n def forward(self, x, x_hat, mask_x=None, mask_x_hat=None):\n # x = x.cuda()\n # x_hat = x_hat.cuda()\n # resize images to 256px resolution\n \n N, C, H, W = x.shape\n \n # x = self.upsample2d(x)\n # x_hat = self.upsample2d(x_hat)\n \n x = F.interpolate(x, size=(256,256), mode=\"bilinear\")\n x_hat = F.interpolate(x_hat, size=(256,256), mode=\"bilinear\")\n\n if self.normalize:\n x = self.normalize_img(x)\n x_hat = self.normalize_img(x_hat)\n \n loss = self.cal_style(self.vgg16_act, x, x_hat, mask_x=mask_x, mask_x_hat=mask_x_hat)\n\n return loss\n\n def cal_style(self, model, x, x_hat, mask_x=None, mask_x_hat=None):\n # Get features from the model for x and x_hat\n \n # with torch.no_grad():\n # act_x = self.get_features(model, x)\n # for layer in range(0, len(act_x)):\n # act_x[layer].detach_()\n \n # mask 图片\n if mask_x is not None:\n assert mask_x_hat is not None, \"mask_x_hat 必须存在!\"\n H, W = x.size(2), x.size(3)\n mask_x = F.interpolate(mask_x, size=(H,W),mode=\"bilinear\")\n x = x * mask_x\n \n mask_x_hat = F.interpolate(mask_x_hat, size=(H,W),mode=\"bilinear\")\n x_hat = x_hat * mask_x_hat\n\n act_x = self.get_features(model, x)\n act_x_hat = self.get_features(model, x_hat)\n\n loss = 0.0\n for layer in range(0, len(act_x)):\n # # mask features if present\n # if mask_x is not None:\n # feat_x = self.mask_features(act_x[layer], mask_x)\n # else:\n # feat_x = act_x[layer]\n \n # if mask_x_hat is not None:\n # feat_x_hat = self.mask_features(act_x_hat[layer], mask_x_hat)\n # else:\n # feat_x_hat = act_x_hat[layer]\n \n feat_x = act_x[layer]\n feat_x_hat = act_x_hat[layer]\n\n \"\"\" 可视化 feature maps\n import ipdb; ipdb.set_trace()\n fx = feat_x[0, ...].detach().cpu().numpy()\n fx = (fx - fx.min()) / (fx.max() - fx.min())\n fx = fx * 255.\n fxhat = feat_x_hat[0, ...].detach().cpu().numpy()\n fxhat = (fxhat - fxhat.min()) / (fxhat.max() - fxhat.min())\n fxhat = fxhat * 255\n from PIL import Image\n import numpy as np\n for idx, img in enumerate(fx):\n img = fx[idx, ...]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save('plot/feat_x/{}.png'.format(str(idx)))\n img = fxhat[idx, ...]\n img = img.astype(np.uint8)\n img = Image.fromarray(img)\n img.save('plot/feat_x_hat/{}.png'.format(str(idx)))\n import ipdb; ipdb.set_trace()\n \"\"\"\n\n # compute Gram matrix for x and x_hat\n G_x = self.gram_matrix(feat_x)\n G_x_hat = self.gram_matrix(feat_x_hat)\n\n # compute layer wise loss and aggregate\n loss += custom_loss(\n G_x, G_x_hat, mask=None, loss_type=self.distance, include_bkgd=True\n )\n\n loss = loss / len(act_x)\n\n return loss\n\n def get_features(self, model, x):\n\n return model(x)\n\n def mask_features(self, x, mask):\n\n mask = prepare_mask(x, mask)\n return x * mask\n\n def gram_matrix(self, x):\n \"\"\"\n :x is an activation tensor\n \"\"\"\n N, C, H, W = x.shape\n x = x.view(N * C, H * W)\n G = torch.mm(x, x.t())\n\n return G.div(N * H * W * C)" }, { "identifier": "FaceParsingLoss", "path": "criteria/face_parsing/face_parsing_loss.py", "snippet": "class FaceParsingLoss(nn.Module):\n def __init__(self,opts):\n super(FaceParsingLoss, self).__init__()\n print('Loading Face Parsing Net')\n \n self.opts = opts\n self.face_pool = torch.nn.AdaptiveAvgPool2d((512, 512))\n \n self.G = unet()\n self.G.load_state_dict(torch.load(opts.face_parsing_model_path))\n self.G.eval()\n \n self.set_requires_grad(False)\n \n def set_requires_grad(self, flag=True):\n for p in self.parameters():\n p.requires_grad = flag\n \n\n def inference(self, x):\n x = self.face_pool(x) if x.shape[2]!=512 else x # resize to 512 if needed\n labels_predict = self.G(x)\n \n labels_predict_plain = generate_label_plain(labels_predict,imsize=512) # np.array [N,H,W]\n labels_predict_color = generate_label(labels_predict,imsize=512) # torch.Tensor [N,3,H,W]\n \n return labels_predict_plain, labels_predict_color\n \n def extract_feats(self, x):\n x = self.face_pool(x) if x.shape[2]!=512 else x # resize to 512 if needed\n x_feats = self.G.extract_feats(x)\n return x_feats\n\n def forward(self, y_hat, y):\n n_samples = y.shape[0]\n y_feats_ms = self.extract_feats(y) # Otherwise use the feature from there\n y_hat_feats_ms = self.extract_feats(y_hat)\n y_feats_ms = [y_f.detach() for y_f in y_feats_ms] # 各个层的特征\n \n loss_all = 0\n sim_improvement_all = 0\n # 不同尺度\n for y_hat_feats, y_feats in zip(y_hat_feats_ms, y_feats_ms):\n loss = 0\n sim_improvement = 0\n count = 0\n # 不同的sample\n for i in range(n_samples):\n sim_target = y_hat_feats[i].dot(y_feats[i])\n sim_views = y_feats[i].dot(y_feats[i])\n \n loss += 1 - sim_target # id loss\n sim_improvement += float(sim_target) - float(sim_views)\n count += 1\n \n loss_all += loss / count\n sim_improvement_all += sim_improvement / count\n \n return loss_all, sim_improvement_all" }, { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "crop_faces", "path": "utils/alignment.py", "snippet": "def crop_faces(IMAGE_SIZE, files, scale, center_sigma=0.0, xy_sigma=0.0, use_fa=False):\n if use_fa:\n fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True)\n predictor = None\n detector = None\n else:\n import dlib\n fa = None\n predictor = dlib.shape_predictor(\n make_abs_path(\"../pretrained/E4S/shape_predictor_68_face_landmarks.dat\")\n )\n detector = dlib.get_frontal_face_detector()\n\n cs, xs, ys = [], [], []\n for _, path in tqdm(files):\n c, x, y = compute_transform(path, predictor, detector=detector,\n scale=scale, fa=fa)\n cs.append(c)\n xs.append(x)\n ys.append(y)\n\n cs = np.stack(cs)\n xs = np.stack(xs)\n ys = np.stack(ys)\n if center_sigma != 0:\n cs = gaussian_filter1d(cs, sigma=center_sigma, axis=0)\n\n if xy_sigma != 0:\n xs = gaussian_filter1d(xs, sigma=xy_sigma, axis=0)\n ys = gaussian_filter1d(ys, sigma=xy_sigma, axis=0)\n\n quads = np.stack([cs - xs - ys, cs - xs + ys, cs + xs + ys, cs + xs - ys], axis=1)\n quads = list(quads)\n\n crops, orig_images = crop_faces_by_quads(IMAGE_SIZE, files, quads)\n\n return crops, orig_images, quads" }, { "identifier": "calc_alignment_coefficients", "path": "utils/alignment.py", "snippet": "def calc_alignment_coefficients(pa, pb):\n matrix = []\n for p1, p2 in zip(pa, pb):\n matrix.append([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]])\n matrix.append([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]])\n\n a = np.matrix(matrix, dtype=float)\n b = np.array(pb).reshape(8)\n\n res = np.dot(np.linalg.inv(a.T * a) * a.T, b)\n return np.array(res).reshape(8)" }, { "identifier": "dilation", "path": "utils/morphology.py", "snippet": "def dilation(\n tensor: torch.Tensor,\n kernel: torch.Tensor,\n structuring_element: Optional[torch.Tensor] = None,\n origin: Optional[List[int]] = None,\n border_type: str = 'geodesic',\n border_value: float = 0.0,\n max_val: float = 1e4,\n engine: str = 'unfold',\n) -> torch.Tensor:\n r\"\"\"Return the dilated image applying the same kernel in each channel.\n .. image:: _static/img/dilation.png\n The kernel must have 2 dimensions.\n Args:\n tensor: Image with shape :math:`(B, C, H, W)`.\n kernel: Positions of non-infinite elements of a flat structuring element. Non-zero values give\n the set of neighbors of the center over which the operation is applied. Its shape is :math:`(k_x, k_y)`.\n For full structural elements use torch.ones_like(structural_element).\n structuring_element: Structuring element used for the grayscale dilation. It may be a non-flat\n structuring element.\n origin: Origin of the structuring element. Default: ``None`` and uses the center of\n the structuring element as origin (rounding towards zero).\n border_type: It determines how the image borders are handled, where ``border_value`` is the value\n when ``border_type`` is equal to ``constant``. Default: ``geodesic`` which ignores the values that are\n outside the image when applying the operation.\n border_value: Value to fill past edges of input if ``border_type`` is ``constant``.\n max_val: The value of the infinite elements in the kernel.\n engine: convolution is faster and less memory hungry, and unfold is more stable numerically\n Returns:\n Dilated image with shape :math:`(B, C, H, W)`.\n .. note::\n See a working example `here <https://kornia-tutorials.readthedocs.io/en/latest/\n morphology_101.html>`__.\n Example:\n >>> tensor = torch.rand(1, 3, 5, 5)\n >>> kernel = torch.ones(3, 3)\n >>> dilated_img = dilation(tensor, kernel)\n \"\"\"\n\n if not isinstance(tensor, torch.Tensor):\n raise TypeError(f\"Input type is not a torch.Tensor. Got {type(tensor)}\")\n\n if len(tensor.shape) != 4:\n raise ValueError(f\"Input size must have 4 dimensions. Got {tensor.dim()}\")\n\n if not isinstance(kernel, torch.Tensor):\n raise TypeError(f\"Kernel type is not a torch.Tensor. Got {type(kernel)}\")\n\n if len(kernel.shape) != 2:\n raise ValueError(f\"Kernel size must have 2 dimensions. Got {kernel.dim()}\")\n\n # origin\n se_h, se_w = kernel.shape\n if origin is None:\n origin = [se_h // 2, se_w // 2]\n\n # pad\n pad_e: List[int] = [origin[1], se_w - origin[1] - 1, origin[0], se_h - origin[0] - 1]\n if border_type == 'geodesic':\n border_value = -max_val\n border_type = 'constant'\n output: torch.Tensor = F.pad(tensor, pad_e, mode=border_type, value=border_value)\n\n # computation\n if structuring_element is None:\n neighborhood = torch.zeros_like(kernel)\n neighborhood[kernel == 0] = -max_val\n else:\n neighborhood = structuring_element.clone()\n neighborhood[kernel == 0] = -max_val\n\n if engine == 'unfold':\n output = output.unfold(2, se_h, 1).unfold(3, se_w, 1)\n output, _ = torch.max(output + neighborhood.flip((0, 1)), 4)\n output, _ = torch.max(output, 4)\n elif engine == 'convolution':\n B, C, H, W = tensor.size()\n h_pad, w_pad = output.shape[-2:]\n reshape_kernel = _neight2channels_like_kernel(kernel)\n output, _ = F.conv2d(\n output.view(B * C, 1, h_pad, w_pad), reshape_kernel, padding=0, bias=neighborhood.view(-1).flip(0)\n ).max(dim=1)\n output = output.view(B, C, H, W)\n else:\n raise NotImplementedError(f\"engine {engine} is unknown, use 'convolution' or 'unfold'\")\n return output.view_as(tensor)" }, { "identifier": "init_faceParsing_pretrained_model", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def init_faceParsing_pretrained_model(ckpt_path):\n parser = FaceParser(seg_ckpt=ckpt_path)\n\n print(\"Load faceParsing pre-traiend model success!\")\n\n return parser" }, { "identifier": "faceParsing_demo", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def faceParsing_demo(model, img: Image, convert_to_seg12=True):\n \"\"\"\n 提取 img 的face segmentation map\n \n args:\n model (Object): 加载好的预训练模型\n img (PIL.Image): [0, 255]范围的 PIL.Image 格式图片\n \"\"\"\n with torch.no_grad():\n seg = model(img).cpu().numpy().astype(np.uint8)\n \n if convert_to_seg12:\n seg = __ffhq_masks_to_faceParser_mask_detailed(seg)\n return seg" }, { "identifier": "vis_parsing_maps", "path": "swap_face_fine/face_parsing/face_parsing_demo.py", "snippet": "def vis_parsing_maps(image, parsing_anno, stride=1):\n \"\"\" 将原图 和 seg map 放到一起可视化\n \n args:\n img (PIL.Image): [0, 255]范围的 PIL.Image 格式图片\n parsing_anno (np.array): parsing之后的seg map, size为 [512, 512]\n return:\n vis_im (np.array): 可视化图片, 用cv2保存\n \"\"\"\n # Colors for all 20 parts\n part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0],\n [255, 0, 85], [255, 0, 170],\n [0, 255, 0], [85, 255, 0], [170, 255, 0],\n [0, 255, 85], [0, 255, 170],\n [0, 0, 255], [85, 0, 255], [170, 0, 255],\n [0, 85, 255], [0, 170, 255],\n [255, 255, 0], [255, 255, 85], [255, 255, 170],\n [255, 0, 255], [255, 85, 255], [255, 170, 255],\n [0, 255, 255], [85, 255, 255], [170, 255, 255]]\n\n im = image.resize((parsing_anno.shape[0], parsing_anno.shape[1]), Image.BILINEAR)\n im = np.array(im)\n vis_im = im.copy().astype(np.uint8)\n vis_parsing_anno = parsing_anno.copy().astype(np.uint8)\n vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)\n vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255\n\n num_of_class = np.max(vis_parsing_anno)\n\n for pi in range(1, num_of_class + 1):\n index = np.where(vis_parsing_anno == pi)\n vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]\n\n vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)\n # print(vis_parsing_anno_color.shape, vis_im.shape)\n vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)\n\n return vis_im" } ]
import numpy as np import torchvision.transforms as transforms import os import json import sys import pprint import torch import random import torch.nn.functional as F import torch.nn as nn import glob from torch.utils.data import DataLoader from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE, MASK_CONVERT_TF, MASK_CONVERT_TF_DETAILED, FFHQ_MASK_CONVERT_TF_DETAILED from models.networks import Net3 from options.optim_options import OptimOptions from criteria.id_loss import IDLoss from criteria.lpips.lpips import LPIPS from criteria.style_loss import StyleLoss from criteria.face_parsing.face_parsing_loss import FaceParsingLoss from functools import partial from utils import torch_utils from tqdm import tqdm from torchvision.utils import make_grid from PIL import Image from utils.alignment import crop_faces, calc_alignment_coefficients from utils.morphology import dilation from swap_face_fine.face_parsing.face_parsing_demo import init_faceParsing_pretrained_model, faceParsing_demo, vis_parsing_maps
13,793
sys.path.append(".") sys.path.append("..") toPIL = transforms.ToPILImage() celelbAHQ_label_list = ['background','skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'] # supported_swap_comps= ['background','skin', 'nose', 'eye', # 'brow','ear', 'mouth','hair', # 'hat','ear_r','neck', 'cloth'] # 9个属性 faceParser_label_list = ['background', 'mouth', 'eyebrows', 'eyes', 'hair', 'nose', 'skin', 'ears', 'belowface'] def paste_image(coeffs, img, orig_image): pasted_image = orig_image.copy().convert('RGBA') projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, coeffs, Image.BILINEAR) pasted_image.paste(projected, (0, 0), mask=projected) return pasted_image def save_image(image, output_folder, image_name, image_index, ext='jpg'): if ext == 'jpeg' or ext == 'jpg': image = image.convert('RGB') folder = os.path.join(output_folder, image_name) os.makedirs(folder, exist_ok=True) image.save(os.path.join(folder, "%04d.%s"%(image_index,ext))) class Optimizer: def __init__(self, opts, net=None): self.opts = opts if net is None: """ self.test_ds = CelebAHQDataset(dataset_root=self.opts.dataset_root, mode="test", img_transform=transforms.Compose( [TO_TENSOR, NORMALIZE]), label_transform=transforms.Compose( [ MASK_CONVERT_TF_DETAILED,TO_TENSOR]), # MASK_CONVERT_TF, fraction=self.opts.ds_frac) print(f"Number of test samples: {len(self.test_ds)}") """ # self.test_dataloader = DataLoader(self.test_ds, batch_size=self.opts.test_batch_size, # shuffle=False, num_workers=int(self.opts.test_workers), drop_last=False) assert self.opts.checkpoint_path is not None, "please specify the pre-trained weights!" self.net = Net3(self.opts).eval().to(self.opts.device) ckpt_dict = torch.load(self.opts.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opts.device) if self.opts.start_from_latent_avg else None if self.opts.load_ema: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict_ema"],prefix="module.")) else: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Load pre-trained weights.") # # 重新保存一下 # torch.save(ckpt_dict,"./ckpt.pth",_use_new_zipfile_serialization=False) else: self.net = net # loss 函数 self.mse_loss = nn.MSELoss().to(self.opts.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.opts.device).eval() if self.opts.id_lambda > 0: self.id_loss = IDLoss(self.opts).to(self.opts.device).eval() if self.opts.face_parsing_lambda > 0: self.face_parsing_loss = FaceParsingLoss(self.opts).to(self.opts.device).eval() self.img_transform = transforms.Compose([TO_TENSOR, NORMALIZE]) self.label_transform_wo_converter = transforms.Compose([TO_TENSOR])
sys.path.append(".") sys.path.append("..") toPIL = transforms.ToPILImage() celelbAHQ_label_list = ['background','skin', 'nose', 'eye_g', 'l_eye', 'r_eye', 'l_brow', 'r_brow', 'l_ear', 'r_ear', 'mouth', 'u_lip', 'l_lip', 'hair', 'hat', 'ear_r', 'neck_l', 'neck', 'cloth'] # supported_swap_comps= ['background','skin', 'nose', 'eye', # 'brow','ear', 'mouth','hair', # 'hat','ear_r','neck', 'cloth'] # 9个属性 faceParser_label_list = ['background', 'mouth', 'eyebrows', 'eyes', 'hair', 'nose', 'skin', 'ears', 'belowface'] def paste_image(coeffs, img, orig_image): pasted_image = orig_image.copy().convert('RGBA') projected = img.convert('RGBA').transform(orig_image.size, Image.PERSPECTIVE, coeffs, Image.BILINEAR) pasted_image.paste(projected, (0, 0), mask=projected) return pasted_image def save_image(image, output_folder, image_name, image_index, ext='jpg'): if ext == 'jpeg' or ext == 'jpg': image = image.convert('RGB') folder = os.path.join(output_folder, image_name) os.makedirs(folder, exist_ok=True) image.save(os.path.join(folder, "%04d.%s"%(image_index,ext))) class Optimizer: def __init__(self, opts, net=None): self.opts = opts if net is None: """ self.test_ds = CelebAHQDataset(dataset_root=self.opts.dataset_root, mode="test", img_transform=transforms.Compose( [TO_TENSOR, NORMALIZE]), label_transform=transforms.Compose( [ MASK_CONVERT_TF_DETAILED,TO_TENSOR]), # MASK_CONVERT_TF, fraction=self.opts.ds_frac) print(f"Number of test samples: {len(self.test_ds)}") """ # self.test_dataloader = DataLoader(self.test_ds, batch_size=self.opts.test_batch_size, # shuffle=False, num_workers=int(self.opts.test_workers), drop_last=False) assert self.opts.checkpoint_path is not None, "please specify the pre-trained weights!" self.net = Net3(self.opts).eval().to(self.opts.device) ckpt_dict = torch.load(self.opts.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opts.device) if self.opts.start_from_latent_avg else None if self.opts.load_ema: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict_ema"],prefix="module.")) else: self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Load pre-trained weights.") # # 重新保存一下 # torch.save(ckpt_dict,"./ckpt.pth",_use_new_zipfile_serialization=False) else: self.net = net # loss 函数 self.mse_loss = nn.MSELoss().to(self.opts.device).eval() if self.opts.lpips_lambda > 0: self.lpips_loss = LPIPS(net_type='alex').to(self.opts.device).eval() if self.opts.id_lambda > 0: self.id_loss = IDLoss(self.opts).to(self.opts.device).eval() if self.opts.face_parsing_lambda > 0: self.face_parsing_loss = FaceParsingLoss(self.opts).to(self.opts.device).eval() self.img_transform = transforms.Compose([TO_TENSOR, NORMALIZE]) self.label_transform_wo_converter = transforms.Compose([TO_TENSOR])
self.label_transform_w_converter = transforms.Compose([MASK_CONVERT_TF_DETAILED, TO_TENSOR])
5
2023-10-15 12:15:01+00:00
16k
sotopia-lab/sotopia
examples/experiment_eval.py
[ { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "EnvAgentComboStorage", "path": "sotopia/database/env_agent_combo_storage.py", "snippet": "class EnvAgentComboStorage(JsonModel):\n env_id: str = Field(default_factory=lambda: \"\", index=True)\n agent_ids: list[str] = Field(default_factory=lambda: [], index=True)" }, { "identifier": "EpisodeLog", "path": "sotopia/database/logs.py", "snippet": "class EpisodeLog(JsonModel):\n # Note that we did not validate the following constraints:\n # 1. The number of turns in messages and rewards should be the same or off by 1\n # 2. The agents in the messages are the same as the agetns\n\n environment: str = Field(index=True)\n agents: list[str] = Field(index=True)\n tag: str | None = Field(index=True)\n models: list[str] | None = Field(index=True)\n messages: list[list[tuple[str, str, str]]] # Messages arranged by turn\n reasoning: str\n rewards: list[\n tuple[float, dict[str, float]] | float\n ] # Rewards arranged by turn\n rewards_prompt: str\n\n @root_validator\n def agent_number_message_number_reward_number_turn_number_match(\n cls, values: Any\n ) -> Any:\n agents, _, reasoning, rewards = (\n values.get(\"agents\"),\n values.get(\"messages\"),\n values.get(\"reasoning\"),\n values.get(\"rewards\"),\n )\n agent_number = len(agents)\n\n assert (\n len(rewards) == agent_number\n ), f\"Number of agents in rewards {len(rewards)} and agents {agent_number} do not match\"\n return values\n\n def render_for_humans(self) -> tuple[list[AgentProfile], list[str]]:\n \"\"\"Generate a human readable version of the episode log.\n\n Returns:\n A tuple of (a list of agent_profiles, a list of str): The agent profiles, and the messages and rewards in each turn.\n \"\"\"\n\n agent_profiles = [\n AgentProfile.get(pk=uuid_str) for uuid_str in self.agents\n ]\n messages_and_rewards = []\n for idx, turn in enumerate(self.messages):\n messages_in_this_turn = []\n if idx == 0:\n assert (\n len(turn) >= 2\n ), \"The first turn should have at least environemnt messages\"\n messages_in_this_turn.append(turn[0][2])\n messages_in_this_turn.append(turn[1][2])\n for sender, receiver, message in turn:\n if receiver == \"Environment\":\n if sender != \"Environment\":\n if \"did nothing\" in message:\n continue\n else:\n if \"said:\" in message:\n messages_in_this_turn.append(\n f\"{sender} {message}\"\n )\n else:\n messages_in_this_turn.append(\n f\"{sender}: {message}\"\n )\n else:\n messages_in_this_turn.append(message)\n messages_and_rewards.append(\"\\n\".join(messages_in_this_turn))\n messages_and_rewards.append(f\"The reasoning is:\\n{self.reasoning}\")\n messages_and_rewards.append(\n f\"The rewards are:\\nAgent 1: {self.rewards[0]}\\nAgent 2: {self.rewards[1]}\"\n )\n return agent_profiles, messages_and_rewards" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "LLM_Name", "path": "sotopia/generation_utils/generate.py", "snippet": "class EnvResponse(BaseModel):\nclass EnvResponsePydanticOutputParser(PydanticOutputParser[EnvResponse]):\nclass ListOfIntOutputParser(BaseOutputParser[list[int]]):\nclass ListOfStrOutputParser(BaseOutputParser[list[str]]):\nclass StrOutputParser(BaseOutputParser[str]):\nclass ScriptOutputParser(BaseOutputParser[ScriptInteractionReturnType]):\n def __init__(self, pydantic_object: Type[BaseModel] = EnvResponse) -> None:\n def parse(self, text: str) -> EnvResponse:\n def get_format_instructions(self) -> str:\n def __init__(\n self,\n number_of_int: int | None = None,\n range_of_int: tuple[int, int] | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[int]:\n def _type(self) -> str:\n def __init__(\n self,\n number_of_str: int | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[str]:\n def _type(self) -> str:\n def __init__(self) -> None:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> str:\n def _type(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> ScriptInteractionReturnType:\n def _type(self) -> str:\ndef _return_fixed_model_version(\n model_name: Literal[\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-turbo\"]\n) -> str:\ndef obtain_chain(\n model_name: LLM_Name,\n template: str,\n input_variables: list[str],\n temperature: float = 0.7,\n max_retries: int = 6,\n) -> LLMChain:\ndef format_bad_output_for_script(\n ill_formed_output: str,\n format_instructions: str,\n agents: list[str],\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef format_bad_output(\n ill_formed_output: str,\n format_instructions: str,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef generate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> OutputType:\nasync def agenerate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> tuple[OutputType, str]:\ndef generate_episode(\n model_name: LLM_Name,\n participants: str = \"Jack (a greedy person), Rose\",\n topic: str = \"lawsuit\",\n extra_info: str = \"\",\n) -> EnvResponse:\nasync def agenerate_env_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n temperature: float = 0.7,\n) -> tuple[EnvironmentProfile, str]:\nasync def agenerate_relationship_profile(\n model_name: LLM_Name,\n agents_profiles: list[str],\n) -> tuple[RelationshipProfile, str]:\nasync def agenerate_enviroment_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n) -> tuple[EnvironmentProfile, str]:\ndef fill_in_background(\n model_name: LLM_Name,\n partial_background: ScriptBackground,\n) -> ScriptBackground:\ndef generate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\ndef generate_action_speak(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\nasync def agenerate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n temperature: float = 0.7,\n script_like: bool = False,\n) -> tuple[AgentAction, str]:\nasync def agenerate_script(\n model_name: LLM_Name,\n background: ScriptBackground,\n temperature: float = 0.7,\n agent_names: list[str] = [],\n agent_name: str = \"\",\n history: str = \"\",\n single_step: bool = False,\n) -> tuple[ScriptInteractionReturnType, str]:\ndef process_history(\n script: ScriptBackground | EnvResponse | dict[str, AgentAction]\n) -> str:\ndef generate_init_profile(\n model_name: LLM_Name, basic_info: dict[str, str]\n) -> str:\ndef convert_narratives(model_name: LLM_Name, narrative: str, text: str) -> str:\ndef generate_goal(model_name: LLM_Name, background: str) -> str:" }, { "identifier": "AgentAction", "path": "sotopia/messages/message_classes.py", "snippet": "class AgentAction(Message):\n action_type: ActionType = Field(\n description=\"whether to speak at this turn or choose to not do anything\"\n )\n argument: str = Field(\n description=\"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\"\n )\n\n def to_natural_language(self) -> str:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"" }, { "identifier": "Message", "path": "sotopia/messages/message_classes.py", "snippet": "class Message(BaseModel):\n \"\"\"\n An interface for messages.\n There is only one required method: to_natural_language\n \"\"\"\n\n def to_natural_language(self) -> str:\n raise NotImplementedError" }, { "identifier": "Observation", "path": "sotopia/messages/message_classes.py", "snippet": "class Observation(Message):\n last_turn: str = Field(description=\"the last turn of the conversation\")\n turn_number: int = Field(description=\"the turn number of the conversation\")\n available_actions: list[ActionType] = Field(\n description=\"the available actions\"\n )\n\n def to_natural_language(self) -> str:\n if self.turn_number == 0:\n return f\"\\n{self.last_turn}\\nConversation Starts:\\n\"\n else:\n return f\"Turn #{self.turn_number-1}: {self.last_turn}\\n\"" }, { "identifier": "BaseSampler", "path": "sotopia/samplers/base_sampler.py", "snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:" }, { "identifier": "ConstraintBasedSampler", "path": "sotopia/samplers/constraint_based_sampler.py", "snippet": "class ConstraintBasedSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 10,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and a list of agents based on the constraints of the environment.\n\n Note: Sampling without replacement is only restricted to single env candidate.\n This is due to the fact that the number of possible combinations of env and agents is huge.\n Please sample for each env separately if you want to sample without replacement.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n env_profiles: list[EnvironmentProfile] = []\n agents_which_fit_scenario: list[list[str]] = []\n\n agent_candidate_ids: set[str] | None = None\n if self.agent_candidates:\n agent_candidate_ids = set(\n str(agent.pk) if not isinstance(agent, str) else agent\n for agent in self.agent_candidates\n )\n else:\n agent_candidate_ids = None\n\n if not replacement:\n assert self.env_candidates and len(self.env_candidates) == 1, (\n \"Sampling without replacement is only restricted to single env candidate (must be provided in the constructor). \"\n \"This is due to the fact that the number of possible combinations of env and agents is huge. \"\n \"Please sample for each env separately if you want to sample without replacement.\"\n )\n\n env_profile_id = (\n self.env_candidates[0].pk\n if not isinstance(self.env_candidates[0], str)\n else self.env_candidates[0]\n )\n\n assert env_profile_id, \"Env candidate must have an id\"\n\n agents_which_fit_scenario = _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, size\n )\n env_profiles = (\n [EnvironmentProfile.get(env_profile_id)] * size\n if isinstance(self.env_candidates[0], str)\n else [self.env_candidates[0]] * size\n )\n else:\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env_profiles.append(env_profile)\n env_profile_id = env_profile.pk\n assert env_profile_id, \"Env candidate must have an id\"\n agents_which_fit_scenario.append(\n _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, 1\n )[0]\n )\n\n assert (\n len(env_profiles) == size\n ), \"Number of env_profiles is not equal to size\"\n assert (\n len(agents_which_fit_scenario) == size\n ), \"Number of agents_which_fit_scenario is not equal to size\"\n\n for env_profile, agent_profile_id_list in zip(\n env_profiles, agents_which_fit_scenario\n ):\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n agent_profiles = [\n AgentProfile.get(id) for id in agent_profile_id_list\n ]\n\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" }, { "identifier": "run_async_server", "path": "sotopia/server.py", "snippet": "@gin.configurable\n@beartype\nasync def run_async_server(\n model_dict: dict[str, LLM_Name],\n sampler: BaseSampler[Observation, AgentAction] = BaseSampler(),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"round-robin\",\n env_agent_combo_list: list[EnvAgentCombo[Observation, AgentAction]] = [],\n omniscient: bool = False,\n script_like: bool = False,\n json_in_script: bool = False,\n tag: str | None = None,\n push_to_db: bool = False,\n using_async: bool = True,\n) -> list[list[tuple[str, str, Message]]]:\n \"\"\"\n Doc incomplete\n\n Args:\n omniscient (bool): Whether the agent knows the goal of the other, default to False\n script_like (bool): Whether we generate the turn in script like manner, default to False\n json_in_script (bool): Whether we requires the script generator to return json (Only valid when script_like is True), default to False\n\n Note: env_agent_combo_list is optional. When it defaults to [], sampler is used\n else the sampler is not used. Please pass in BaseSampler or simply not specify it when using this option.\n \"\"\"\n\n assert not (\n push_to_db and tag is None\n ), \"please provide a tag when push to db\"\n\n # Create Environment and agents\n # This step will be moved to outside this function\n\n env_params = {\n \"model_name\": model_dict[\"env\"],\n \"action_order\": action_order,\n \"evaluators\": [\n RuleBasedTerminatedEvaluator(max_turn_number=20, max_stale_turn=2),\n ],\n \"terminal_evaluators\": [\n ReachGoalLLMEvaluator(model_dict[\"env\"]),\n ],\n }\n agents_model_dict = {\n \"agent1\": model_dict[\"agent1\"],\n \"agent2\": model_dict[\"agent2\"],\n }\n\n def get_agent_class(\n model_name: str,\n ) -> Type[BaseAgent[Observation, AgentAction]]:\n if model_name == \"human\":\n return HumanAgent\n elif script_like and not json_in_script:\n return ScriptWritingAgent\n else:\n return LLMAgent\n\n if env_agent_combo_list:\n assert (\n type(sampler) is BaseSampler\n ), \"No sampler should be used when `env_agent_combo_list` is empty\"\n env_agent_combo_iter = iter(env_agent_combo_list)\n else:\n env_agent_combo_iter = sampler.sample(\n agent_classes=[\n get_agent_class(model_name)\n for model_name in agents_model_dict.values()\n ],\n n_agent=len(agents_model_dict),\n env_params=env_params,\n agents_params=[\n {\"model_name\": model_name} if model_name != \"human\" else {}\n for model_name in agents_model_dict.values()\n ],\n )\n episode_futures = [\n arun_one_episode(\n env=env_agent_combo[0],\n agent_list=env_agent_combo[1],\n model_dict=model_dict,\n omniscient=omniscient,\n script_like=script_like,\n json_in_script=json_in_script,\n tag=tag,\n push_to_db=push_to_db,\n )\n for env_agent_combo in env_agent_combo_iter\n ]\n\n batch_results = (\n await tqdm_asyncio.gather(*episode_futures, desc=\"Running one batch\")\n if using_async\n else [await i for i in episode_futures]\n )\n\n return cast(list[list[tuple[str, str, Message]]], batch_results)" }, { "identifier": "parse_gin_flags", "path": "sotopia_conf/gin_utils.py", "snippet": "def parse_gin_flags(\n gin_search_paths: Sequence[str],\n gin_files: Sequence[str],\n gin_bindings: Sequence[str],\n skip_unknown: Union[bool, Sequence[str]] = False,\n finalize_config: bool = True,\n) -> None:\n \"\"\"Parses provided gin files override params.\n Args:\n gin_search_paths: paths that will be searched for gin files.\n gin_files: paths to gin config files to be parsed. Files will be parsed in\n order with conflicting settings being overriden by later files. Paths may\n be relative to paths in `gin_search_paths`.\n gin_bindings: individual gin bindings to be applied after the gin files are\n parsed. Will be applied in order with conflicting settings being overriden\n by later oens.\n skip_unknown: whether to ignore unknown bindings or raise an error (default\n behavior). Alternatively, a list of configurable names to skip if unknown.\n finalize_config: whether to finalize the config so that it cannot be\n modified (default behavior).\n \"\"\"\n # Register .gin file search paths with gin\n for gin_file_path in gin_search_paths:\n gin.add_config_file_search_path(gin_file_path)\n\n # Parse config files and bindings passed via flag.\n gin.parse_config_files_and_bindings(\n gin_files,\n gin_bindings,\n skip_unknown=skip_unknown,\n finalize_config=finalize_config,\n )\n logging.info(\"Gin Configuration:\")\n for line in gin.config_str().splitlines():\n logging.info(\"%s\", line)" }, { "identifier": "run", "path": "sotopia_conf/gin_utils.py", "snippet": "def run(main: Any) -> None:\n \"\"\"Wrapper for app.run that rewrites gin args before parsing.\"\"\"\n app.run(\n main,\n flags_parser=lambda args: app.parse_flags_with_usage(\n rewrite_gin_args(args)\n ),\n )" } ]
import asyncio import logging import os import subprocess import sys import gin from datetime import datetime from logging import FileHandler from typing import Any, Callable, Generator, Literal, Sequence, cast from absl import app, flags from rich import print from rich.logging import RichHandler from tqdm import tqdm from sotopia.agents import LLMAgent from sotopia.database import ( AgentProfile, EnvAgentComboStorage, EnvironmentProfile, EpisodeLog, ) from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, ) from sotopia.envs.parallel import ParallelSotopiaEnv from sotopia.generation_utils.generate import LLM_Name from sotopia.messages import AgentAction, Message, Observation from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, ) from sotopia.server import run_async_server from sotopia_conf.gin_utils import parse_gin_flags, run
12,523
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None:
_DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] FLAGS = flags.FLAGS # date and message only FORMAT = "%(asctime)s - %(levelname)s - %(name)s - %(message)s" process = subprocess.Popen( ["git", "rev-parse", "HEAD"], shell=False, stdout=subprocess.PIPE ) git_head_hash = process.communicate()[0].strip() logging.basicConfig( level=15, format=FORMAT, datefmt="[%X]", handlers=[ RichHandler(), FileHandler( datetime.now().strftime( f"./logs/%H_%M_%d_%m_%Y_{str(git_head_hash.decode('utf-8'))}.log" ) ), ], ) env_ids: list[str] = list(EnvironmentProfile.all_pks()) assert all( isinstance(env_id, str) for env_id in env_ids ), "env_ids should be a list of strings" def check_existing_episodes( env_id: str, agent_ids: list[str], models: dict[str, LLM_Name], tag: str | None = None, ) -> bool: if tag: existing_episode = EpisodeLog.find( (EpisodeLog.environment == env_id) & (EpisodeLog.tag == tag) ).all() else: existing_episode = EpisodeLog.find( EpisodeLog.environment == env_id ).all() if existing_episode: for episode in existing_episode: assert isinstance( episode, EpisodeLog ), "episode should be an EpisodeLog" if episode.agents == agent_ids and episode.models == list( models.values() ): return True return False else: return False def _sample_env_agent_combo_and_push_to_db(env_id: str) -> None:
sampler = ConstraintBasedSampler[Observation, AgentAction](
11
2023-10-23 19:47:26+00:00
16k
uukuguy/multi_loras
multi_loras/slora/router/manager.py
[ { "identifier": "SamplingParams", "path": "multi_loras/slora/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret" }, { "identifier": "Req", "path": "multi_loras/slora/io_struct.py", "snippet": "class Req:\n def __init__(self, adapter_dir, request_id, prompt_ids, sample_params: SamplingParams):\n self.adapter_dir = adapter_dir\n self.request_id = request_id\n self.prompt_ids = prompt_ids\n self.input_len = len(prompt_ids)\n self.max_output_len = sample_params.max_new_tokens\n self.sample_params = sample_params\n self.output_ids = []\n self.output_metadata_list = []\n self.has_generate_finished = False\n self.aborted = False\n\n def to_rpc_obj(self):\n return {\"adapter_dir\": self.adapter_dir,\n \"request_id\": self.request_id,\n \"input_id\": self.prompt_ids,\n \"output_len\": self.max_output_len,\n \"sampling_param\": self.sample_params.to_dict() }\n\n def to_req_detokenization_state(self):\n out = ReqDetokenizationState(self.request_id, self.prompt_ids, self.max_output_len, self.sample_params.ignore_eos)\n if self.output_metadata_list:\n out.gen_metadata.update(self.output_metadata_list[-1])\n return out\n \n def stop_sequences_matched(self):\n for stop_token_ids in self.sample_params.stop_sequences:\n stop_len = len(stop_token_ids)\n if stop_len > 0:\n if len(self.output_ids) >= stop_len:\n if all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)):\n return True\n return False\n\n def __repr__(self):\n return (f\"request_id(n={self.request_id}, \"\n f\"adapter_dir={self.adapter_dir}, \"\n f\"prompt_ids={self.prompt_ids}, \")" }, { "identifier": "Batch", "path": "multi_loras/slora/io_struct.py", "snippet": "class Batch:\n def __init__(self, batch_id, reqs: List[Req]):\n self.batch_id = batch_id\n self.reqs = reqs\n self.id_to_reqs = {req.request_id: req for req in reqs}\n\n self.adapter_dirs = set()\n for req in reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def input_tokens(self):\n batch_input_tokens = 0\n for req in self.reqs:\n batch_input_tokens += req.input_len\n return batch_input_tokens\n\n def calcu_max_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + req.max_output_len\n return tokens\n \n def calcu_used_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + len(req.output_ids)\n return tokens\n\n def mark_finished_req(self, eos_id):\n has_new_finish = False\n for req in self.reqs:\n if req.stop_sequences_matched():\n req.has_generate_finished = True\n has_new_finish = True\n if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False:\n req.has_generate_finished = True\n has_new_finish = True\n if len(req.output_ids) >= req.max_output_len or req.aborted:\n req.has_generate_finished = True\n has_new_finish = True\n return has_new_finish\n\n def filter_finished(self):\n unfinished_req = []\n for req in self.reqs:\n if not req.has_generate_finished:\n unfinished_req.append(req)\n self.reqs = unfinished_req\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n\n self.adapter_dirs = set()\n for req in self.reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def is_clear(self):\n return len(self.reqs) == 0\n\n def merge(self, mini_batch):\n for _req in mini_batch.reqs:\n self.reqs.append(_req)\n self.adapter_dirs.add(_req.adapter_dir)\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n return\n\n def __repr__(self):\n return (f\"batch_id={self.batch_id}, \"\n # f\"reqs={self.reqs}, \"\n f\"req_ids={self.id_to_reqs.keys()}\")" }, { "identifier": "BatchAbortReq", "path": "multi_loras/slora/io_struct.py", "snippet": "class BatchAbortReq:\n def __init__(self, req_ids):\n self.reqs: List[str] = req_ids" }, { "identifier": "BatchTokenIdOut", "path": "multi_loras/slora/io_struct.py", "snippet": "class BatchTokenIdOut:\n def __init__(self):\n self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = [] # [req_id, new_token_id, gen_metadata, finished_state, abort_state]" }, { "identifier": "AbortReq", "path": "multi_loras/slora/io_struct.py", "snippet": "class AbortReq:\n def __init__(self, req_id):\n self.req_id = req_id" }, { "identifier": "InputParams", "path": "multi_loras/slora/router/input_params.py", "snippet": "class InputParams:\n\n def __init__(\n self,\n max_req_total_len,\n # kv cache manager parameters\n max_total_token_num,\n pool_size_lora,\n batch_max_tokens,\n running_max_req_size,\n # mem_ratio,\n # adapter_ratio,\n # heuristic\n swap,\n prefetch,\n prefetch_size,\n scheduler,\n profile,\n batch_num_adapters,\n enable_abort,\n # kernel,\n # # debug\n dummy,\n no_lora_compute,\n no_lora_swap,\n # no_lora_copy,\n no_kernel,\n no_mem_pool,\n bmm,\n ) -> None:\n self.max_req_total_len = max_req_total_len\n self.max_total_token_num = max_total_token_num\n self.pool_size_lora = pool_size_lora\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n # self.mem_ratio = mem_ratio\n # self.adapter_ratio = adapter_ratio\n\n self.swap = swap\n self.prefetch = prefetch\n self.prefetch_size = prefetch_size\n self.scheduler = scheduler\n self.profile = profile\n self.batch_num_adapters = batch_num_adapters\n self.enable_abort = enable_abort\n # self.kernel = kernel\n\n self.dummy = dummy\n self.no_lora_compute = no_lora_compute\n self.no_lora_swap = no_lora_swap\n # self.no_lora_copy = no_lora_copy\n self.no_kernel = no_kernel\n self.no_mem_pool = no_mem_pool\n self.bmm = bmm\n return" }, { "identifier": "start_model_process", "path": "multi_loras/slora/router/model_infer/model_rpc.py", "snippet": "async def start_model_process(port, world_size):\n # 单卡时不使用 rpc\n if world_size == 1:\n return ModelRpcClient(ModelRpcServer(), world_size)\n \n import multiprocessing\n proc = multiprocessing.Process(target=_init_env, args=(port,))\n proc.start()\n await asyncio.sleep(2)\n repeat_count = 0\n while repeat_count < 20:\n try:\n con = rpyc.connect(\"localhost\", port, config={\"allow_pickle\": True})\n break\n except BaseException:\n await asyncio.sleep(1)\n repeat_count += 1\n if repeat_count == 20:\n raise Exception(\"init rpc env error!\")\n\n assert proc.is_alive()\n return ModelRpcClient(con.root, world_size, rpc_server_process=proc)" }, { "identifier": "ModelRpcClient", "path": "multi_loras/slora/router/model_infer/model_rpc.py", "snippet": "class ModelRpcClient:\n def __init__(self, model_rpc, world_size, rpc_server_process=None):\n self.model: ModelRpcServer = model_rpc\n self.world_size = world_size\n self.rpc_server_process = rpc_server_process\n self.use_rpc = self.world_size != 1\n if self.use_rpc:\n def async_wrap(f):\n f = rpyc.async_(f)\n async def _func(*args, **kwargs):\n ans = f(*args, **kwargs)\n await asyncio.to_thread(ans.wait)\n # raise if exception\n return ans.value\n return _func\n self._init_model = async_wrap(self.model.init_model)\n self._load_adapters = rpyc.async_(self.model.load_adapters)\n self._offload_adapters = rpyc.async_(self.model.offload_adapters)\n self._unmerge_adapter = rpyc.async_(self.model.unmerge_adapter)\n self._merge_adapter = rpyc.async_(self.model.merge_adapter)\n self._add_batch = async_wrap(self.model.add_batch)\n self._prefill_batch = async_wrap(self.model.prefill_batch)\n self._decode_batch = async_wrap(self.model.decode_batch)\n self._filter_batch = async_wrap(self.model.filter_batch)\n self._merge_batch = async_wrap(self.model.merge_batch)\n self._remove_batch = async_wrap(self.model.remove_batch)\n self._profile_prefill = async_wrap(self.model.profile_prefill)\n else:\n self._init_model = self.model.exposed_init_model\n self._load_adapters = self.model.exposed_load_adapters\n self._offload_adapters = self.model.exposed_offload_adapters\n self._merge_adapter = self.model.exposed_merge_adapter\n self._unmerge_adapter = self.model.exposed_unmerge_adapter\n self._add_batch = self.model.exposed_add_batch\n self._prefill_batch = self.model.exposed_prefill_batch\n self._decode_batch = self.model.exposed_decode_batch\n self._filter_batch = self.model.exposed_filter_batch\n self._merge_batch = self.model.exposed_merge_batch\n self._remove_batch = self.model.exposed_remove_batch\n self._profile_prefill = self.model.exposed_profile_prefill\n return\n\n async def init_model(self, rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t prefetch_stream):\n ans : rpyc.AsyncResult = self._init_model(rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t\t\t\t prefetch_stream)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n\n async def load_adapters(self, reqs, prefetch=False):\n self._load_adapters(reqs, prefetch=prefetch)\n\n\n async def offload_adapters(self, reserved_reqs=None, prefetch=False):\n self._offload_adapters(reserved_reqs, prefetch=prefetch)\n \n async def unmerge_adapter(self):\n self._unmerge_adapter()\n \n async def merge_adapter(self):\n self._merge_adapter()\n\n\n async def init_batch(self, batch_id, reqs):\n ans = self._add_batch(batch_id, reqs, \"fp16\")\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def prefill_batch(self, batch_id):\n ans = self._prefill_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def decode_batch(self, batch_id):\n ans = self._decode_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def filter_batch(self, batch_id, req_id_list):\n ans = self._filter_batch(batch_id, req_id_list)\n if self.use_rpc:\n await ans\n return\n else:\n return \n\n async def merge_batch(self, batch_id1, batch_id2):\n ans = self._merge_batch(batch_id1, batch_id2)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def remove_batch(self, batch_id):\n ans = self._remove_batch(batch_id)\n if self.use_rpc:\n await ans\n return\n else:\n return\n \n async def profile_prefill(self):\n ans = self._profile_prefill()\n if self.use_rpc:\n return await ans\n else:\n return ans" }, { "identifier": "ReqQueue", "path": "multi_loras/slora/router/req_queue.py", "snippet": "class ReqQueue:\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n self.max_total_tokens = max_total_tokens\n assert batch_max_tokens is not None\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n self.waiting_req_list: List[Req] = []\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "Stats", "path": "multi_loras/slora/router/stats.py", "snippet": "class Stats:\n\n def __init__(self, log_status, log_stats_interval) -> None:\n self.log_stats = log_status\n self.log_stats_interval = log_stats_interval\n self.last_log_time = time.time()\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n return\n \n def count_prompt_tokens(self, run_batch):\n if self.log_stats:\n tokens = run_batch.input_tokens()\n self.prompt_tokens += tokens\n self.all_tokens += tokens\n return\n \n def count_output_tokens(self, run_batch):\n if self.log_stats:\n tokens = len(run_batch.reqs)\n self.output_tokens += tokens\n self.all_tokens += tokens\n return\n\n def print_stats(self):\n if not self.log_stats:\n return\n\n now = time.time()\n if now - self.last_log_time > self.log_stats_interval:\n print(f\"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s\")\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n self.last_log_time = now\n return" }, { "identifier": "AlphaModel", "path": "multi_loras/slora/router/profiler.py", "snippet": "class AlphaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n print(self.base_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n\n def get_latency(self, batch_size, seq_len):\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n return (self.base_prefill[batch_size - 1][seq_len] + self.base_prefill[batch_size + 1][seq_len]) / 2\n else:\n return np.Inf" }, { "identifier": "BetaModel", "path": "multi_loras/slora/router/profiler.py", "snippet": "class BetaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n self.adapter_prefill = profiling_results[1]\n print(self.adapter_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n \n def get_latency(self, rank_size, batch_size, seq_len):\n if rank_size == 0: return 0\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.adapter_prefill[rank_size][batch_size][seq_len] - self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.adapter_prefill[rank_size][2][seq_len] - self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n a = self.adapter_prefill[rank_size][batch_size - 1][seq_len] - self.base_prefill[batch_size - 1][seq_len]\n b = self.adapter_prefill[rank_size][batch_size + 1][seq_len] - self.base_prefill[batch_size + 1][seq_len]\n return (a + b) / 2\n else:\n return np.Inf" }, { "identifier": "PETSReqQueue", "path": "multi_loras/slora/router/pets_req_queue.py", "snippet": "class PETSReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.alpha = None\n self.beta = None # will be set automatically in the profiling function in router.manager\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n def intra_task_batching(self, lora_ranks):\n ## Preprocessing: gather the queries with the same adapter.\n clustered_queries_by_adapter = {}\n for query in self.waiting_req_list:\n adapter_dir = query.adapter_dir\n if adapter_dir in clustered_queries_by_adapter:\n clustered_queries_by_adapter[adapter_dir].append(query)\n else:\n clustered_queries_by_adapter[adapter_dir] = [query]\n\n ## DP\n mini_batches = []\n for adapter_dir, queries in clustered_queries_by_adapter.items():\n state_1st_stage = []\n split_idx_list = []\n\n ### Sort queries according to the sequence length in ascending order.\n queries = sorted(queries, key=lambda x: x.input_len)\n queries.insert(0, None) # Sentinel.\n\n ### Initialize.\n state_1st_stage.append(0)\n split_idx_list.append(0)\n for j in range(1, len(queries)):\n min_cost = np.Inf # INF\n split_idx = 0\n for k in range(1, j+1):\n lora_rank = lora_ranks[adapter_dir]\n tmp = state_1st_stage[k-1] + self.beta.get_latency(lora_rank, j-k+1, queries[j].input_len)\n if tmp < min_cost:\n min_cost = tmp\n split_idx = k-1\n split_idx_list.append(split_idx)\n state_1st_stage.append(min_cost)\n \n ### Split queries into mini-batches according to split_idx_list.\n \n end_idx = len(queries) - 1\n\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n mini_batch = []\n max_len = queries[end_idx].input_len\n for j in range(start_idx, end_idx + 1):\n mini_batch.append(queries[j]) \n mini_batches.append((mini_batch, max_len))\n end_idx = split_idx_list[end_idx] \n \n return mini_batches\n \n # Inter-task batching.\n def inter_task_batching(self, mini_batches):\n ## Sort mini_batches according to the max sequence length.\n mini_batches = sorted(mini_batches, key=lambda x: x[1])\n mini_batches.insert(0, None) # Sentinel.\n\n tmp = 0\n mini_batch_sum = [0]\n for mini_batch in mini_batches[1:]:\n tmp += len(mini_batch[0])\n mini_batch_sum.append(tmp)\n\n ## DP.\n state_2nd_stage = []\n split_idx_list = []\n state_2nd_stage.append(0)\n split_idx_list.append(0)\n\n for i in range(1, len(mini_batches)):\n min_cost = np.Inf # INF\n split_idx = 0\n for j in range(1, i+1):\n total_samples = mini_batch_sum[i] - mini_batch_sum[j-1]\n tmp = state_2nd_stage[j-1] + self.alpha.get_latency(total_samples, mini_batches[i][1])\n if tmp < min_cost:\n min_cost = tmp\n split_idx = j - 1\n split_idx_list.append(split_idx)\n state_2nd_stage.append(min_cost)\n\n ## Split mini_batches into final scheduled_batches.\n ### Split mini_batches into macro_batches.\n\n end_idx = len(mini_batches) - 1\n macro_batches = []\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n macro_batch = []\n max_len = mini_batches[end_idx][1]\n for j in range(start_idx, end_idx + 1):\n macro_batch.append(mini_batches[j]) \n macro_batches.append((macro_batch, max_len))\n end_idx = split_idx_list[end_idx] \n\n total_samples = 0\n for macro_batch in macro_batches:\n for mini_batch in macro_batch[0]:\n total_samples += len(mini_batch[0])\n # print(total_samples)\n\n return macro_batches\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n reqs = self.waiting_req_list\n # when waiting_reqs > 20\n if len(self.waiting_req_list) > 10:\n mini_batches = self.intra_task_batching(lora_ranks)\n macro_batches = self.inter_task_batching(mini_batches)\n \n macro_batch = macro_batches[-1][0]\n reqs = [req for minibatch in macro_batch for req in minibatch[0]]\n \n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in reqs:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "PEFTReqQueue", "path": "multi_loras/slora/router/peft_req_queue.py", "snippet": "class PEFTReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n if len(self.waiting_req_list) > 0 and current_batch is None:\n adapter_dir = self.waiting_req_list[0].adapter_dir\n if current_batch is not None:\n adapter_dir = current_batch.reqs[0].adapter_dir\n # heuristics:\n # TODO: think more\n max_other_waited_reqs = 30\n other_waited_reqs = 0\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n other_waited_reqs += 1\n if other_waited_reqs > max_other_waited_reqs:\n return None\n continue\n if req.adapter_dir == adapter_dir:\n break\n\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n continue\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "ClusterReqQueue", "path": "multi_loras/slora/router/cluster_req_queue.py", "snippet": "class ClusterReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, batch_num_adapters) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.batch_num_adapters = batch_num_adapters\n\n def _generate_new_batch_prioritizing_existing_adapters(self, current_batch:Batch, lora_ranks: dict[str, int]):\n filtered_waiting_req_list = list(filter(lambda req: req.adapter_dir in current_batch.adapter_dirs, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n can_run_list = []\n new_batch_total_tokens = 0\n for idx, req in enumerate(filtered_waiting_req_list):\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n\n # If filtered waiting list was not enough to max-out the current running batch, we resolve to FIFO\n for req in self.waiting_req_list:\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n\n return can_run_list\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n\n if current_batch is not None and len(current_batch.adapter_dirs) >= self.batch_num_adapters:\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n rest_of_batch = self._generate_new_batch_prioritizing_existing_adapters(current_batch, lora_ranks)\n can_run_list += rest_of_batch\n if len(can_run_list) != 0:\n return Batch(uuid.uuid4().hex, can_run_list)\n else:\n return None\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None" }, { "identifier": "AbortReqQueue", "path": "multi_loras/slora/router/abort_req_queue.py", "snippet": "class AbortReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.abort_req_list: List[str] = []\n self.req_time_stamp = []\n self.init_bs = 1\n self.apprx_req_rate = 1\n self.apprx_bs = self.init_bs\n self.last_req_num = 0\n self.last_time = time.time()\n \n def append(self, req):\n self.waiting_req_list.insert(0, req)\n self.req_time_stamp.insert(0, time.time())\n assert len(self.waiting_req_list) == len(self.req_time_stamp)\n return\n\n def reset_abort_list(self):\n self.abort_req_list = []\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n self.apprx_req_rate = int(0.7 * (len(self.waiting_req_list) - self.last_req_num) + 0.3 * self.apprx_req_rate)\n for i, req in enumerate(self.waiting_req_list):\n if attainment_func(time.time() - self.req_time_stamp[i] + 0.5) == 0:\n req.aborted = True\n aborted_count += 1\n abort_list.append(req)\n self.abort_req_list.append(req.request_id)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in abort_list]\n \n if self.apprx_req_rate >= self.apprx_bs:\n print(\"apprx bs\", self.apprx_bs, \"req rate\", self.apprx_req_rate)\n # choose from the latest requests\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n elif self.apprx_req_rate < self.apprx_bs:\n # choose from the earliest requests\n for req in reversed(self.waiting_req_list):\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n \n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in can_run_list and self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n self.last_req_num = len(self.waiting_req_list)\n self.apprx_bs = max(int(0.7 * len(new_batch.reqs) + 0.3 * self.apprx_bs), self.init_bs)\n return new_batch\n else:\n return None" }, { "identifier": "get_lora_config", "path": "multi_loras/slora/models/peft/lora_adapter.py", "snippet": "def get_lora_config(lora_dir, dummy):\n if dummy:\n return get_lora_config_json(lora_dir), lora_dir\n else:\n lora_dir = re.sub(r'-(\\d+)$', '', lora_dir)\n return hf_load_config(lora_dir)" } ]
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq, BatchTokenIdOut, AbortReq from .input_params import InputParams from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from .stats import Stats from .profiler import AlphaModel, BetaModel from .pets_req_queue import PETSReqQueue from .peft_req_queue import PEFTReqQueue from .cluster_req_queue import ClusterReqQueue from .abort_req_queue import AbortReqQueue from ..models.peft.lora_adapter import get_lora_config
13,943
for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans): batch_out = BatchTokenIdOut() for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] batch_out.reqs_infs.append((req_id, new_token_id, new_gen_metadata, req.has_generate_finished, req.aborted)) self.send_to_detokenization.send_pyobj(batch_out) return async def loop_for_netio_req(self): while True: recv_req = await self.recv_from_httpserver.recv_pyobj() if isinstance(recv_req, tuple) and len(recv_req) == 4: adapter_dir, prompt_ids, sampling_params, request_id = recv_req self.add_req(adapter_dir, prompt_ids, sampling_params, request_id)
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 if input_params.scheduler == "pets": self.req_queue = PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": self.req_queue = PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: self.req_queue = ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: self.req_queue = AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: self.req_queue = ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_batch is not None: self.stats_tool.count_prompt_tokens(new_batch) self.running_batch = new_batch # load adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_batch.adapter_dirs)) await asyncio.gather(*ret) # merge adapter to base model if self.input_params.scheduler == "peft": torch.cuda.synchronize() ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].merge_adapter()) await asyncio.gather(*ret) torch.cuda.synchronize() await self._prefill_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens = 0 return if self.has_wait_tokens < self.max_wait_tokens: self.stats_tool.count_output_tokens(self.running_batch) # prefetch if (self.input_params.prefetch and (self.has_wait_tokens == self.max_wait_tokens // 2 or self.has_wait_tokens == self.max_wait_tokens - 3) and self.input_params.scheduler != "peft"): next_batch = self.req_queue.next_batch() if next_batch is not None: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters( next_batch.adapter_dirs, prefetch=True)) await asyncio.gather(*ret) await self._decode_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans): batch_out = BatchTokenIdOut() for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] batch_out.reqs_infs.append((req_id, new_token_id, new_gen_metadata, req.has_generate_finished, req.aborted)) self.send_to_detokenization.send_pyobj(batch_out) return async def loop_for_netio_req(self): while True: recv_req = await self.recv_from_httpserver.recv_pyobj() if isinstance(recv_req, tuple) and len(recv_req) == 4: adapter_dir, prompt_ids, sampling_params, request_id = recv_req self.add_req(adapter_dir, prompt_ids, sampling_params, request_id)
elif isinstance(recv_req, AbortReq):
5
2023-10-16 02:39:47+00:00
16k
MobileLLM/AutoDroid
droidbot/input_manager.py
[ { "identifier": "EventLog", "path": "droidbot/input_event.py", "snippet": "class EventLog(object):\n \"\"\"\n save an event to local file system\n \"\"\"\n\n def __init__(self, device, app, event, profiling_method=None, tag=None):\n self.device = device\n self.app = app\n self.event = event\n if tag is None:\n from datetime import datetime\n tag = datetime.now().strftime(\"%Y-%m-%d_%H%M%S\")\n self.tag = tag\n\n self.from_state = None\n self.to_state = None\n self.event_str = None\n\n self.profiling_method = profiling_method\n self.trace_remote_file = \"/data/local/tmp/event.trace\"\n self.is_profiling = False\n self.profiling_pid = -1\n self.sampling = None\n # sampling feature was added in Android 5.0 (API level 21)\n if profiling_method is not None and \\\n str(profiling_method) != \"full\" and \\\n self.device.get_sdk_version() >= 21:\n self.sampling = int(profiling_method)\n\n def to_dict(self):\n return {\n \"tag\": self.tag,\n \"event\": self.event.to_dict(),\n \"start_state\": self.from_state.state_str,\n \"stop_state\": self.to_state.state_str,\n \"event_str\": self.event_str\n }\n\n def save2dir(self, output_dir=None):\n # Save event\n if output_dir is None:\n if self.device.output_dir is None:\n return\n else:\n output_dir = os.path.join(self.device.output_dir, \"events\")\n try:\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n event_json_file_path = \"%s/event_%s.json\" % (output_dir, self.tag)\n event_json_file = open(event_json_file_path, \"w\")\n json.dump(self.to_dict(), event_json_file, indent=2)\n event_json_file.close()\n except Exception as e:\n self.device.logger.warning(\"Saving event to dir failed.\")\n self.device.logger.warning(e)\n\n def save_views(self, output_dir=None):\n # Save views\n views = self.event.get_views()\n if views:\n for view_dict in views:\n self.from_state.save_view_img(view_dict=view_dict, output_dir=output_dir)\n\n def is_start_event(self):\n if isinstance(self.event, IntentEvent):\n intent_cmd = self.event.intent\n if \"start\" in intent_cmd and self.app.get_package_name() in intent_cmd:\n return True\n return False\n\n def start(self):\n \"\"\"\n start sending event\n \"\"\"\n self.from_state = self.device.get_current_state()\n self.start_profiling()\n self.event_str = self.event.get_event_str(self.from_state)\n print(\"Action: %s\" % self.event_str)\n self.device.send_event(self.event)\n\n def start_profiling(self):\n \"\"\"\n start profiling the current event\n @return:\n \"\"\"\n if self.profiling_method is None:\n return\n if self.is_profiling:\n return\n pid = self.device.get_app_pid(self.app)\n if pid is None:\n if self.is_start_event():\n start_intent = self.app.get_start_with_profiling_intent(self.trace_remote_file, self.sampling)\n self.event.intent = start_intent.get_cmd()\n self.is_profiling = True\n return\n if self.sampling is not None:\n self.device.adb.shell(\n [\"am\", \"profile\", \"start\", \"--sampling\", str(self.sampling), str(pid), self.trace_remote_file])\n else:\n self.device.adb.shell([\"am\", \"profile\", \"start\", str(pid), self.trace_remote_file])\n self.is_profiling = True\n self.profiling_pid = pid\n\n def stop(self):\n \"\"\"\n finish sending event\n \"\"\"\n self.stop_profiling()\n self.to_state = self.device.get_current_state()\n self.save2dir()\n self.save_views()\n\n def stop_profiling(self, output_dir=None):\n if self.profiling_method is None:\n return\n if not self.is_profiling:\n return\n try:\n if self.profiling_pid == -1:\n pid = self.device.get_app_pid(self.app)\n if pid is None:\n return\n self.profiling_pid = pid\n\n self.device.adb.shell([\"am\", \"profile\", \"stop\", str(self.profiling_pid)])\n if self.sampling is None:\n time.sleep(3) # guess this time can vary between machines\n\n if output_dir is None:\n if self.device.output_dir is None:\n return\n else:\n output_dir = os.path.join(self.device.output_dir, \"events\")\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n event_trace_local_path = \"%s/event_trace_%s.trace\" % (output_dir, self.tag)\n self.device.pull_file(self.trace_remote_file, event_trace_local_path)\n\n except Exception as e:\n self.device.logger.warning(\"profiling event failed\")\n self.device.logger.warning(e)" }, { "identifier": "UtgBasedInputPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgBasedInputPolicy(InputPolicy):\n \"\"\"\n state-based input policy\n \"\"\"\n\n def __init__(self, device, app, random_input):\n super(UtgBasedInputPolicy, self).__init__(device, app)\n self.random_input = random_input\n self.script = None\n self.master = None\n self.script_events = []\n self.last_event = None\n self.last_state = None\n self.current_state = None\n self.utg = UTG(device=device, app=app, random_input=random_input)\n self.script_event_idx = 0\n if self.device.humanoid is not None:\n self.humanoid_view_trees = []\n self.humanoid_events = []\n\n def generate_event(self, input_manager):\n \"\"\"\n generate an event\n @return:\n \"\"\"\n\n # Get current device state\n self.current_state = self.device.get_current_state()\n if self.current_state is None:\n import time\n time.sleep(5)\n return KeyEvent(name=\"BACK\")\n\n self.__update_utg()\n\n # update last view trees for humanoid\n if self.device.humanoid is not None:\n self.humanoid_view_trees = self.humanoid_view_trees + [self.current_state.view_tree]\n if len(self.humanoid_view_trees) > 4:\n self.humanoid_view_trees = self.humanoid_view_trees[1:]\n\n event = None\n\n # if the previous operation is not finished, continue\n if len(self.script_events) > self.script_event_idx:\n event = self.script_events[self.script_event_idx].get_transformed_event(self)\n self.script_event_idx += 1\n\n # First try matching a state defined in the script\n if event is None and self.script is not None:\n operation = self.script.get_operation_based_on_state(self.current_state)\n if operation is not None:\n self.script_events = operation.events\n # restart script\n event = self.script_events[0].get_transformed_event(self)\n self.script_event_idx = 1\n\n if event is None:\n old_state, event = self.generate_event_based_on_utg(input_manager)\n import time\n time.sleep(3)\n # update last events for humanoid\n if self.device.humanoid is not None:\n self.humanoid_events = self.humanoid_events + [event]\n if len(self.humanoid_events) > 3:\n self.humanoid_events = self.humanoid_events[1:]\n\n self.last_state = self.current_state if old_state is None else old_state\n self.last_event = event\n return event\n\n def __update_utg(self):\n self.utg.add_transition(self.last_event, self.last_state, self.current_state)\n\n @abstractmethod\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on UTG\n :return: InputEvent\n \"\"\"\n pass" }, { "identifier": "UtgNaiveSearchPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgNaiveSearchPolicy(UtgBasedInputPolicy):\n \"\"\"\n depth-first strategy to explore UFG (old)\n \"\"\"\n\n def __init__(self, device, app, random_input, search_method):\n super(UtgNaiveSearchPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.explored_views = set()\n self.state_transitions = set()\n self.search_method = search_method\n\n self.last_event_flag = \"\"\n self.last_event_str = None\n self.last_state = None\n\n self.preferred_buttons = [\"yes\", \"ok\", \"activate\", \"detail\", \"more\", \"access\",\n \"allow\", \"check\", \"agree\", \"try\", \"go\", \"next\"]\n\n def generate_event_based_on_utg(self):\n \"\"\"\n generate an event based on current device state\n note: ensure these fields are properly maintained in each transaction:\n last_event_flag, last_touched_view, last_state, exploited_views, state_transitions\n @return: InputEvent\n \"\"\"\n self.save_state_transition(self.last_event_str, self.last_state, self.current_state)\n\n if self.device.is_foreground(self.app):\n # the app is in foreground, clear last_event_flag\n self.last_event_flag = EVENT_FLAG_STARTED\n else:\n number_of_starts = self.last_event_flag.count(EVENT_FLAG_START_APP)\n # If we have tried too many times but the app is still not started, stop DroidBot\n if number_of_starts > MAX_NUM_RESTARTS:\n raise InputInterruptedException(\"The app cannot be started.\")\n\n # if app is not started, try start it\n if self.last_event_flag.endswith(EVENT_FLAG_START_APP):\n # It seems the app stuck at some state, and cannot be started\n # just pass to let viewclient deal with this case\n self.logger.info(\"The app had been restarted %d times.\", number_of_starts)\n self.logger.info(\"Trying to restart app...\")\n pass\n else:\n start_app_intent = self.app.get_start_intent()\n\n self.last_event_flag += EVENT_FLAG_START_APP\n self.last_event_str = EVENT_FLAG_START_APP\n return IntentEvent(start_app_intent)\n\n # select a view to click\n view_to_touch = self.select_a_view(self.current_state)\n\n # if no view can be selected, restart the app\n if view_to_touch is None:\n stop_app_intent = self.app.get_stop_intent()\n self.last_event_flag += EVENT_FLAG_STOP_APP\n self.last_event_str = EVENT_FLAG_STOP_APP\n return IntentEvent(stop_app_intent)\n\n view_to_touch_str = view_to_touch['view_str']\n if view_to_touch_str.startswith('BACK'):\n result = KeyEvent('BACK')\n else:\n result = TouchEvent(view=view_to_touch)\n\n self.last_event_flag += EVENT_FLAG_TOUCH\n self.last_event_str = view_to_touch_str\n self.save_explored_view(self.current_state, self.last_event_str)\n return result\n\n def select_a_view(self, state):\n \"\"\"\n select a view in the view list of given state, let droidbot touch it\n @param state: DeviceState\n @return:\n \"\"\"\n views = []\n for view in state.views:\n if view['enabled'] and len(view['children']) == 0:\n views.append(view)\n\n if self.random_input:\n random.shuffle(views)\n\n # add a \"BACK\" view, consider go back first/last according to search policy\n mock_view_back = {'view_str': 'BACK_%s' % state.foreground_activity,\n 'text': 'BACK_%s' % state.foreground_activity}\n if self.search_method == POLICY_NAIVE_DFS:\n views.append(mock_view_back)\n elif self.search_method == POLICY_NAIVE_BFS:\n views.insert(0, mock_view_back)\n\n # first try to find a preferable view\n for view in views:\n view_text = view['text'] if view['text'] is not None else ''\n view_text = view_text.lower().strip()\n if view_text in self.preferred_buttons \\\n and (state.foreground_activity, view['view_str']) not in self.explored_views:\n self.logger.info(\"selected an preferred view: %s\" % view['view_str'])\n return view\n\n # try to find a un-clicked view\n for view in views:\n if (state.foreground_activity, view['view_str']) not in self.explored_views:\n self.logger.info(\"selected an un-clicked view: %s\" % view['view_str'])\n return view\n\n # if all enabled views have been clicked, try jump to another activity by clicking one of state transitions\n if self.random_input:\n random.shuffle(views)\n transition_views = {transition[0] for transition in self.state_transitions}\n for view in views:\n if view['view_str'] in transition_views:\n self.logger.info(\"selected a transition view: %s\" % view['view_str'])\n return view\n\n # no window transition found, just return a random view\n # view = views[0]\n # self.logger.info(\"selected a random view: %s\" % view['view_str'])\n # return view\n\n # DroidBot stuck on current state, return None\n self.logger.info(\"no view could be selected in state: %s\" % state.tag)\n return None\n\n def save_state_transition(self, event_str, old_state, new_state):\n \"\"\"\n save the state transition\n @param event_str: str, representing the event cause the transition\n @param old_state: DeviceState\n @param new_state: DeviceState\n @return:\n \"\"\"\n if event_str is None or old_state is None or new_state is None:\n return\n if new_state.is_different_from(old_state):\n self.state_transitions.add((event_str, old_state.tag, new_state.tag))\n\n def save_explored_view(self, state, view_str):\n \"\"\"\n save the explored view\n @param state: DeviceState, where the view located\n @param view_str: str, representing a view\n @return:\n \"\"\"\n if not state:\n return\n state_activity = state.foreground_activity\n self.explored_views.add((state_activity, view_str))" }, { "identifier": "UtgGreedySearchPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgGreedySearchPolicy(UtgBasedInputPolicy):\n \"\"\"\n DFS/BFS (according to search_method) strategy to explore UFG (new)\n \"\"\"\n\n def __init__(self, device, app, random_input, search_method):\n super(UtgGreedySearchPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.search_method = search_method\n\n self.preferred_buttons = [\"yes\", \"ok\", \"activate\", \"detail\", \"more\", \"access\",\n \"allow\", \"check\", \"agree\", \"try\", \"go\", \"next\"]\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n self.__num_restarts = 0\n self.__num_steps_outside = 0\n self.__event_trace = \"\"\n self.__missed_states = set()\n self.__random_explore = False\n\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n current_state = self.current_state\n self.logger.info(\"Current state: %s\" % current_state.state_str)\n if current_state.state_str in self.__missed_states:\n self.__missed_states.remove(current_state.state_str)\n\n if current_state.get_app_activity_depth(self.app) < 0:\n # If the app is not in the activity stack\n start_app_intent = self.app.get_start_intent()\n\n # It seems the app stucks at some state, has been\n # 1) force stopped (START, STOP)\n # just start the app again by increasing self.__num_restarts\n # 2) started at least once and cannot be started (START)\n # pass to let viewclient deal with this case\n # 3) nothing\n # a normal start. clear self.__num_restarts.\n\n if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \\\n or self.__event_trace.endswith(EVENT_FLAG_START_APP):\n self.__num_restarts += 1\n self.logger.info(\"The app had been restarted %d times.\", self.__num_restarts)\n else:\n self.__num_restarts = 0\n\n # pass (START) through\n if not self.__event_trace.endswith(EVENT_FLAG_START_APP):\n if self.__num_restarts > MAX_NUM_RESTARTS:\n # If the app had been restarted too many times, enter random mode\n msg = \"The app had been restarted too many times. Entering random mode.\"\n self.logger.info(msg)\n self.__random_explore = True\n else:\n # Start the app\n self.__event_trace += EVENT_FLAG_START_APP\n self.logger.info(\"Trying to start the app...\")\n return IntentEvent(intent=start_app_intent)\n\n elif current_state.get_app_activity_depth(self.app) > 0:\n # If the app is in activity stack but is not in foreground\n self.__num_steps_outside += 1\n\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:\n # If the app has not been in foreground for too long, try to go back\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:\n stop_app_intent = self.app.get_stop_intent()\n go_back_event = IntentEvent(stop_app_intent)\n else:\n go_back_event = KeyEvent(name=\"BACK\")\n self.__event_trace += EVENT_FLAG_NAVIGATE\n self.logger.info(\"Going back to the app...\")\n return go_back_event\n else:\n # If the app is in foreground\n self.__num_steps_outside = 0\n\n # Get all possible input events\n possible_events = current_state.get_possible_input()\n\n if self.random_input:\n random.shuffle(possible_events)\n\n if self.search_method == POLICY_GREEDY_DFS:\n possible_events.append(KeyEvent(name=\"BACK\"))\n elif self.search_method == POLICY_GREEDY_BFS:\n possible_events.insert(0, KeyEvent(name=\"BACK\"))\n\n # get humanoid result, use the result to sort possible events\n # including back events\n if self.device.humanoid is not None:\n possible_events = self.__sort_inputs_by_humanoid(possible_events)\n\n # If there is an unexplored event, try the event first\n for input_event in possible_events:\n if not self.utg.is_event_explored(event=input_event, state=current_state):\n self.logger.info(\"Trying an unexplored event.\")\n self.__event_trace += EVENT_FLAG_EXPLORE\n return input_event\n\n target_state = self.__get_nav_target(current_state)\n if target_state:\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=target_state)\n if navigation_steps and len(navigation_steps) > 0:\n self.logger.info(\"Navigating to %s, %d steps left.\" % (target_state.state_str, len(navigation_steps)))\n self.__event_trace += EVENT_FLAG_NAVIGATE\n return navigation_steps[0][1]\n\n if self.__random_explore:\n self.logger.info(\"Trying random event.\")\n random.shuffle(possible_events)\n return possible_events[0]\n\n # If couldn't find a exploration target, stop the app\n stop_app_intent = self.app.get_stop_intent()\n self.logger.info(\"Cannot find an exploration target. Trying to restart app...\")\n self.__event_trace += EVENT_FLAG_STOP_APP\n return IntentEvent(intent=stop_app_intent)\n\n def __sort_inputs_by_humanoid(self, possible_events):\n if sys.version.startswith(\"3\"):\n from xmlrpc.client import ServerProxy\n else:\n from xmlrpclib import ServerProxy\n proxy = ServerProxy(\"http://%s/\" % self.device.humanoid)\n request_json = {\n \"history_view_trees\": self.humanoid_view_trees,\n \"history_events\": [x.__dict__ for x in self.humanoid_events],\n \"possible_events\": [x.__dict__ for x in possible_events],\n \"screen_res\": [self.device.display_info[\"width\"],\n self.device.display_info[\"height\"]]\n }\n result = json.loads(proxy.predict(json.dumps(request_json)))\n new_idx = result[\"indices\"]\n text = result[\"text\"]\n new_events = []\n\n # get rid of infinite recursive by randomizing first event\n if not self.utg.is_state_reached(self.current_state):\n new_first = random.randint(0, len(new_idx) - 1)\n new_idx[0], new_idx[new_first] = new_idx[new_first], new_idx[0]\n\n for idx in new_idx:\n if isinstance(possible_events[idx], SetTextEvent):\n possible_events[idx].text = text\n new_events.append(possible_events[idx])\n return new_events\n\n def __get_nav_target(self, current_state):\n # If last event is a navigation event\n if self.__nav_target and self.__event_trace.endswith(EVENT_FLAG_NAVIGATE):\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)\n if navigation_steps and 0 < len(navigation_steps) <= self.__nav_num_steps:\n # If last navigation was successful, use current nav target\n self.__nav_num_steps = len(navigation_steps)\n return self.__nav_target\n else:\n # If last navigation was failed, add nav target to missing states\n self.__missed_states.add(self.__nav_target.state_str)\n\n reachable_states = self.utg.get_reachable_states(current_state)\n if self.random_input:\n random.shuffle(reachable_states)\n\n for state in reachable_states:\n # Only consider foreground states\n if state.get_app_activity_depth(self.app) != 0:\n continue\n # Do not consider missed states\n if state.state_str in self.__missed_states:\n continue\n # Do not consider explored states\n if self.utg.is_state_explored(state):\n continue\n self.__nav_target = state\n navigation_steps = self.utg.get_navigation_steps(from_state=current_state, to_state=self.__nav_target)\n if len(navigation_steps) > 0:\n self.__nav_num_steps = len(navigation_steps)\n return state\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n return None" }, { "identifier": "UtgReplayPolicy", "path": "droidbot/input_policy.py", "snippet": "class UtgReplayPolicy(InputPolicy):\n \"\"\"\n Replay DroidBot output generated by UTG policy\n \"\"\"\n\n def __init__(self, device, app, replay_output):\n super(UtgReplayPolicy, self).__init__(device, app)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.replay_output = replay_output\n\n import os\n event_dir = os.path.join(replay_output, \"events\")\n self.event_paths = sorted([os.path.join(event_dir, x) for x in\n next(os.walk(event_dir))[2]\n if x.endswith(\".json\")])\n # skip HOME and start app intent\n self.device = device\n self.app = app\n self.event_idx = 2\n self.num_replay_tries = 0\n self.utg = UTG(device=device, app=app, random_input=None)\n self.last_event = None\n self.last_state = None\n self.current_state = None\n\n def generate_event(self):\n \"\"\"\n generate an event based on replay_output\n @return: InputEvent\n \"\"\"\n import time\n while self.event_idx < len(self.event_paths) and \\\n self.num_replay_tries < MAX_REPLY_TRIES:\n self.num_replay_tries += 1\n current_state = self.device.get_current_state()\n if current_state is None:\n time.sleep(5)\n self.num_replay_tries = 0\n return KeyEvent(name=\"BACK\")\n\n curr_event_idx = self.event_idx\n self.__update_utg()\n while curr_event_idx < len(self.event_paths):\n event_path = self.event_paths[curr_event_idx]\n with open(event_path, \"r\") as f:\n curr_event_idx += 1\n\n try:\n event_dict = json.load(f)\n except Exception as e:\n self.logger.info(\"Loading %s failed\" % event_path)\n continue\n\n if event_dict[\"start_state\"] != current_state.state_str:\n continue\n if not self.device.is_foreground(self.app):\n # if current app is in background, bring it to foreground\n component = self.app.get_package_name()\n if self.app.get_main_activity():\n component += \"/%s\" % self.app.get_main_activity()\n return IntentEvent(Intent(suffix=component))\n \n self.logger.info(\"Replaying %s\" % event_path)\n self.event_idx = curr_event_idx\n self.num_replay_tries = 0\n # return InputEvent.from_dict(event_dict[\"event\"])\n event = InputEvent.from_dict(event_dict[\"event\"])\n self.last_state = self.current_state\n self.last_event = event\n return event \n\n time.sleep(5)\n\n # raise InputInterruptedException(\"No more record can be replayed.\")\n def __update_utg(self):\n self.utg.add_transition(self.last_event, self.last_state, self.current_state)" }, { "identifier": "ManualPolicy", "path": "droidbot/input_policy.py", "snippet": "class ManualPolicy(UtgBasedInputPolicy):\n \"\"\"\n manually explore UFG\n \"\"\"\n\n def __init__(self, device, app):\n super(ManualPolicy, self).__init__(device, app, False)\n self.logger = logging.getLogger(self.__class__.__name__)\n\n self.__first_event = True\n\n def generate_event_based_on_utg(self):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n if self.__first_event:\n self.__first_event = False\n self.logger.info(\"Trying to start the app...\")\n start_app_intent = self.app.get_start_intent()\n return IntentEvent(intent=start_app_intent)\n else:\n return ManualEvent()" }, { "identifier": "TaskPolicy", "path": "droidbot/input_policy.py", "snippet": "class TaskPolicy(UtgBasedInputPolicy):\n\n def __init__(self, device, app, random_input, task, use_memory=True, debug_mode=False):\n super(TaskPolicy, self).__init__(device, app, random_input)\n self.logger = logging.getLogger(self.__class__.__name__)\n self.task = task\n\n self.__nav_target = None\n self.__nav_num_steps = -1\n self.__num_restarts = 0\n self.__num_steps_outside = 0\n self.__event_trace = \"\"\n self.__missed_states = set()\n self.__random_explore = random_input\n self.__action_history = []\n self.__thought_history = []\n self.use_memory = use_memory\n # if use_memory:\n # self.memory = Memory(app_name=self.app.app_name, app_output_path=self.device.output_dir)\n if self.use_memory:\n self.similar_ele_path, self.similar_ele_function, self.similar_ele_statement = self.get_most_similar_element()\n if not self.similar_ele_function:\n self.use_memory = False\n print('=============\\nWarning: Did not find the memory of this app, the app memory is disabled\\n=============')\n else:\n print(f'============\\nFound element: {self.similar_ele_statement}\\nPath: {self.similar_ele_path}\\nFunction: {self.similar_ele_function}\\n============')\n self.state_ele_memory = {} # memorize some important states that contain elements of insight\n\n def get_most_similar_element(self):\n from InstructorEmbedding import INSTRUCTOR\n from sklearn.metrics.pairwise import cosine_similarity\n import numpy as np\n model = INSTRUCTOR('hkunlp/instructor-xl')\n task_embedding = model.encode('task: ' + self.task).reshape(1, -1)\n\n with open('memory/node_filtered_elements.json') as file:\n ele_statements = json.load(file)\n with open('memory/element_description.json') as file:\n ele_functions = json.load(file)\n with open('memory/embedded_elements_desc.json') as file:\n embeddings = json.load(file)\n app_name = self.device.output_dir.split('/')[-1]\n if app_name not in embeddings.keys():\n return None, None, None\n app_embeddings = embeddings[app_name]\n\n # similarities = {}\n max_similarity, similar_ele_idx = -9999, -9999\n for state_str, elements in app_embeddings.items():\n # if the target element is in the first ui, no onclick is needed\n # if ele_statements[app_name][state_str]['path'] == []:\n # continue\n # similarities[state_str] = []\n for idx, ele in enumerate(elements):\n if ele:\n npele = np.array(ele).reshape(1, -1)\n similarity = cosine_similarity(task_embedding, npele)[0][0]\n else:\n similarity = -9999\n # similarities[state_str].append(similarity)\n if similarity > max_similarity:\n max_similarity = similarity\n similar_ele_idx = idx\n similar_state_str = state_str\n\n similar_ele = ele_statements[app_name][similar_state_str]['elements'][similar_ele_idx]\n similar_ele_path = ele_statements[app_name][similar_state_str]['path']\n similar_ele_desc = ele_functions[app_name][similar_state_str][similar_ele_idx]\n del model\n return similar_ele_path, similar_ele_desc, similar_ele\n \n def _scroll_to_top(self, scroller, all_views_for_mark, old_state=None):\n prefix_scroll_event = []\n if old_state is None:\n old_state = self.current_state \n for _ in range(MAX_SCROLL_NUM): # first scroll up to the top\n self.device.send_event(ScrollEvent(view=scroller, direction=\"UP\"))\n scrolled_state = self.device.get_current_state()\n self.utg.add_transition(ScrollEvent(view=scroller, direction=\"UP\"), old_state, scrolled_state)\n old_state = scrolled_state\n state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()\n scrolled_new_views = [] # judge whether there is a new view after scrolling\n for scrolled_view in scrolled_views:\n if scrolled_view not in all_views_for_mark:\n scrolled_new_views.append(scrolled_view)\n all_views_for_mark.append(scrolled_view)\n if len(scrolled_new_views) == 0:\n break\n\n prefix_scroll_event.append(ScrollEvent(view=scroller, direction=\"UP\"))\n return prefix_scroll_event\n\n\n def generate_event_based_on_utg(self, input_manager):\n \"\"\"\n generate an event based on current UTG\n @return: InputEvent\n \"\"\"\n current_state = self.current_state\n self.logger.info(\"Current state: %s\" % current_state.state_str)\n if current_state.state_str in self.__missed_states:\n self.__missed_states.remove(current_state.state_str)\n\n if current_state.get_app_activity_depth(self.app) < 0:\n # If the app is not in the activity stack\n start_app_intent = self.app.get_start_intent()\n\n # It seems the app stucks at some state, has been\n # 1) force stopped (START, STOP)\n # just start the app again by increasing self.__num_restarts\n # 2) started at least once and cannot be started (START)\n # pass to let viewclient deal with this case\n # 3) nothing\n # a normal start. clear self.__num_restarts.\n\n if self.__event_trace.endswith(EVENT_FLAG_START_APP + EVENT_FLAG_STOP_APP) \\\n or self.__event_trace.endswith(EVENT_FLAG_START_APP):\n self.__num_restarts += 1\n self.logger.info(\"The app had been restarted %d times.\", self.__num_restarts)\n else:\n self.__num_restarts = 0\n\n # pass (START) through\n if not self.__event_trace.endswith(EVENT_FLAG_START_APP):\n if self.__num_restarts > MAX_NUM_RESTARTS:\n # If the app had been restarted too many times, enter random mode\n msg = \"The app had been restarted too many times. Entering random mode.\"\n self.logger.info(msg)\n self.__random_explore = True\n else:\n # Start the app\n self.__event_trace += EVENT_FLAG_START_APP\n self.logger.info(\"Trying to start the app...\")\n # self.__action_history = [f'- start the app {self.app.app_name}']\n self.__action_history = [f'- launchApp {self.app.app_name}']\n self.__thought_history = [f'launch the app {self.app.app_name} to finish the task {self.task}']\n return None, IntentEvent(intent=start_app_intent)\n\n elif current_state.get_app_activity_depth(self.app) > 0:\n # If the app is in activity stack but is not in foreground\n self.__num_steps_outside += 1\n\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE:\n # If the app has not been in foreground for too long, try to go back\n if self.__num_steps_outside > MAX_NUM_STEPS_OUTSIDE_KILL:\n stop_app_intent = self.app.get_stop_intent()\n go_back_event = IntentEvent(stop_app_intent)\n else:\n go_back_event = KeyEvent(name=\"BACK\")\n self.__event_trace += EVENT_FLAG_NAVIGATE\n self.logger.info(\"Going back to the app...\")\n self.__action_history.append('- go back')\n self.__thought_history.append('the app has not been in foreground for too long, try to go back')\n return None, go_back_event\n else:\n # If the app is in foreground\n self.__num_steps_outside = 0\n \n \n scrollable_views = current_state.get_scrollable_views()#self._get_scrollable_views(current_state)\n \n if len(scrollable_views) > 0:\n '''\n if there is at least one scroller in the screen, we scroll each scroller many times until all the screens after scrolling have been recorded, you do not need to read\n '''\n # print(scrollable_views)\n\n actions_dict = {}\n whole_state_views, whole_state_actions, whole_state_strs = [], [], []\n\n # state_strs = [current_state.state_str]\n state_prompt, current_candidate_actions, current_views, _ = current_state.get_described_actions()\n all_views_for_mark = copy.deepcopy(current_views) # just for judging whether the screen has been scrolled up to the top\n\n for scrollerid in range(len(scrollable_views)):\n scroller = scrollable_views[scrollerid]\n # prefix_scroll_event = []\n actions_dict[scrollerid] = []\n\n prefix_scroll_event = self._scroll_to_top(scroller, all_views_for_mark)\n \n # after scrolling to the top, update the current_state\n top_state = self.device.get_current_state()\n state_prompt, top_candidate_actions, top_views, _ = top_state.get_described_actions()\n all_views_without_id, all_actions = top_views, top_candidate_actions\n\n too_few_item_time = 0\n\n for _ in range(MAX_SCROLL_NUM): # then scroll down to the bottom\n whole_state_strs.append(top_state.state_str) # record the states from the top to the bottom\n self.device.send_event(ScrollEvent(view=scroller, direction=\"DOWN\"))\n scrolled_state = self.device.get_current_state()\n state_prompt, scrolled_candidate_actions, scrolled_views, _ = scrolled_state.get_described_actions()\n \n scrolled_new_views = []\n for scrolled_view_id in range(len(scrolled_views)):\n scrolled_view = scrolled_views[scrolled_view_id]\n if scrolled_view not in all_views_without_id:\n scrolled_new_views.append(scrolled_view)\n all_views_without_id.append(scrolled_view)\n all_actions.append(prefix_scroll_event + [ScrollEvent(view=scroller, direction=\"DOWN\"), scrolled_candidate_actions[scrolled_view_id]])\n # print('found new views:', scrolled_new_views)\n if len(scrolled_new_views) == 0:\n break\n \n prefix_scroll_event.append(ScrollEvent(view=scroller, direction=\"DOWN\"))\n\n if len(scrolled_new_views) < 2:\n too_few_item_time += 1\n if too_few_item_time >= 2:\n break\n\n self.utg.add_transition(ScrollEvent(view=scroller, direction=\"DOWN\"), top_state, scrolled_state)\n top_state = scrolled_state\n \n # filter out the views that have been added to the whole_state by scrolling other scrollers\n for all_view_id in range(len(all_views_without_id)):\n view = all_views_without_id[all_view_id]\n if view not in whole_state_views:\n whole_state_views.append(view)\n whole_state_actions.append(all_actions[all_view_id])\n \n all_views_for_mark = []\n _ = self._scroll_to_top(scroller, all_views_for_mark, top_state)\n # print(whole_state_views)\n action, candidate_actions, target_view, thought = self._get_action_from_views_actions(\n views=whole_state_views, candidate_actions=whole_state_actions, state_strs=whole_state_strs, action_history=self.__action_history, thought_history=self.__thought_history)\n\n if isinstance(action, list): # the screen has to be scrolled first\n last_state = None\n for eventid in range(len(action) - 1):\n self.device.send_event(action[eventid])\n last_state = self.device.get_current_state()\n # self.__action_history.append(current_state.get_action_desc(action[eventid]))\n self.__action_history.append(current_state.get_action_descv2(action[-1], target_view))\n self.__thought_history.append(thought)\n return last_state, action[-1]\n '''\n end for dealing with scrollers\n '''\n else:\n action, candidate_actions, target_view, thought = self._get_action_from_views_actions(\n current_state=current_state, action_history=self.__action_history, thought_history=self.__thought_history, state_strs=current_state.state_str)\n \n if action == FINISHED:\n return None, FINISHED\n if action is not None:\n self.__action_history.append(current_state.get_action_descv2(action, target_view))\n self.__thought_history.append(thought)\n return None, action\n\n if self.__random_explore:\n self.logger.info(\"Trying random event.\")\n action = random.choice(candidate_actions)\n self.__action_history.append(current_state.get_action_descv2(action, target_view))\n self.__thought_history.append('random trying')\n return None, action\n\n # If couldn't find a exploration target, stop the app\n stop_app_intent = self.app.get_stop_intent()\n self.logger.info(\"Cannot find an exploration target. Trying to restart app...\")\n self.__action_history.append('- stop the app')\n self.__thought_history.append(\"couldn't find a exploration target, stop the app\")\n self.__event_trace += EVENT_FLAG_STOP_APP\n return None, IntentEvent(intent=stop_app_intent)\n \n def _save2yaml(self, file_name, state_prompt, idx, state_str, inputs='null'):\n if not os.path.exists(file_name):\n tmp_data = {\n 'task_name': self.task,\n 'step_num': 0,\n 'records': []\n }\n with open(file_name, 'w', encoding='utf-8') as f:\n yaml.dump(tmp_data, f)\n\n with open(file_name, 'r', encoding='utf-8') as f:\n old_yaml_data = yaml.safe_load(f)\n \n new_records = old_yaml_data['records']\n new_records.append(\n {'State': state_prompt,\n 'Choice': idx,\n 'Input': inputs,\n 'state_str': state_str}\n )\n # import pdb;pdb.set_trace()\n data = {\n 'task_name': self.task,\n 'step_num': len(list(old_yaml_data['records'])),\n 'records': new_records\n }\n with open(file_name, 'w', encoding='utf-8') as f:\n yaml.dump(data, f)\n def _make_prompt_lmql(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):\n if self.use_memory:\n # if isinstance(state_str, list):\n # if len(state_str) == 1:\n # state_str = state_str[0]\n # else:\n # state_str = self.memory.hash_state(state_prompt)\n # new_state_prompt = self.f(action_history, state_prompt, state_str)\n # if new_state_prompt !z= None and new_state_prompt != 'no_description':\n # state_prompt = new_state_prompt\n if len(action_history) <= len(self.similar_ele_path):\n current_ui_id = len(action_history) - 1\n new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)\n if new_state_prompt != state_prompt: # current state contains an element of insight\n self.state_ele_memory[state_str] = new_state_prompt\n state_prompt = new_state_prompt\n # elif state_str in self.state_ele_memory.keys():\n # state_prompt = self.state_ele_memory[state_str]\n\n if use_thoughts:\n history_with_thought = []\n for idx in range(len(action_history)):\n history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])\n else:\n history_with_thought = action_history\n\n\n return '\\n'.join(history_with_thought),state_prompt\n def _make_prompt(self, state_prompt, action_history, is_text, state_str, view_text=None, thought_history=None, use_thoughts=False):\n if self.use_memory:\n # if isinstance(state_str, list):\n # if len(state_str) == 1:\n # state_str = state_str[0]\n # else:\n # state_str = self.memory.hash_state(state_prompt)\n # new_state_prompt = self.f(action_history, state_prompt, state_str)\n # if new_state_prompt !z= None and new_state_prompt != 'no_description':\n # state_prompt = new_state_prompt\n if len(action_history) <= len(self.similar_ele_path):\n current_ui_id = len(action_history) - 1\n new_state_prompt = tools.insert_onclick_into_prompt(state_prompt, self.similar_ele_path[current_ui_id], self.similar_ele_function)\n if new_state_prompt != state_prompt: # current state contains an element of insight\n self.state_ele_memory[state_str] = new_state_prompt\n state_prompt = new_state_prompt\n # elif state_str in self.state_ele_memory.keys():\n # state_prompt = self.state_ele_memory[state_str]\n\n if use_thoughts:\n history_with_thought = []\n for idx in range(len(action_history)):\n history_with_thought.append(action_history[idx] + ' Reason: ' + thought_history[idx])\n else:\n history_with_thought = action_history\n\n introduction = '''You are a smartphone assistant to help users complete tasks by interacting with mobile apps.Given a task, the previous UI actions, and the content of current UI state, your job is to decide whether the task is already finished by the previous actions, and if not, decide which UI element in current UI state should be interacted.'''\n task_prompt = 'Task: ' + self.task\n history_prompt = 'Previous UI actions: \\n' + '\\n'.join(history_with_thought)\n full_state_prompt = 'Current UI state: \\n' + state_prompt\n request_prompt = '''Your answer should always use the following format:1. Completing this task on a smartphone usually involves these steps: <?>.\\n2. Analyses of the relations between the task and the previous UI actions and current UI state: <?>.\\n3. Based on the previous actions, is the task already finished? <Y/N>. The next step should be <?/None>.\\n4. Can the task be proceeded with the current UI state? <Y/N>. Fill in the blanks about the next one interaction: - id=<id number> - action=<tap/input> - input text=<text or N/A>'''\n prompt = introduction + '\\n' + task_prompt + '\\n' + history_prompt + '\\n' + full_state_prompt + '\\n' + request_prompt\n return prompt\n \n def _extract_input_text(self, string, start='Text: ', end=' Thought'):\n start_index = string.find(start) + len(start) # Find the location of 'start'\n if start_index == -1:\n start_index = 0\n end_index = string.find(end) # Find the location of 'end'\n substring = string[start_index:end_index] if end_index != -1 else string[start_index:]\n return substring\n \n def _extract_input_textv2(self, string):\n if string[:11] == 'InputText: ':\n return string[11:]\n else:\n return string\n \n def _get_text_view_description(self, view):\n content_description = safe_dict_get(view, 'content_description', default='')\n view_text = safe_dict_get(view, 'text', default='')\n\n view_desc = f\"<input class='&'>#</input>\"#.replace('&', view_class)#.replace('#', text)\n if view_text:\n view_desc = view_desc.replace('#', view_text)\n else:\n view_desc = view_desc.replace('#', '')\n if content_description:\n view_desc = view_desc.replace('&', content_description)\n else:\n view_desc = view_desc.replace(\" class='&'\", \"\")\n return view_desc\n\n def _get_action_from_views_actions(self, action_history, thought_history, views=None, candidate_actions=None, state_strs=None, current_state=None):\n '''\n get action choice from LLM based on a list of views and corresponding actions\n '''\n if current_state:\n state_prompt, candidate_actions, _, _ = current_state.get_described_actions()\n state_str = current_state.state_str\n if USE_LMQL:\n history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,\n thought_history=thought_history) \n else:\n prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)\n else:\n views_with_id = []\n for id in range(len(views)):\n views_with_id.append(tools.insert_id_into_view(views[id], id))\n state_prompt = '\\n'.join(views_with_id)\n state_str = tools.hash_string(state_prompt)\n if USE_LMQL:\n history, state_prompt = self._make_prompt_lmql(state_prompt, action_history, is_text=False, state_str=state_str,\n thought_history=thought_history) \n else:\n prompt = self._make_prompt(state_prompt, action_history, is_text=False, state_str=state_str, thought_history=thought_history)\n\n # ids = [str(idx) for idx, i in enumerate(candidate_actions)]\n ids = str([i for i in range(len(candidate_actions))])\n \n if USE_LMQL:\n idx, action_type, input_text=prompt_llm_with_history(task=self.task, history=history, ui_desc=state_prompt, ids=ids)\n else:\n print('********************************** prompt: **********************************')\n print(prompt)\n print('********************************** end of prompt **********************************')\n response = tools.query_gpt(prompt)\n \n print(f'response: {response}')\n idx, action_type, input_text = tools.extract_action(response)\n\n file_name = self.device.output_dir +'/'+ self.task.replace('\"', '_').replace(\"'\", '_') + '.yaml' #str(str(time.time()).replace('.', ''))\n idx = int(idx)\n selected_action = candidate_actions[idx]\n \n selected_view_description = tools.get_item_properties_from_id(ui_state_desc=state_prompt, view_id=idx)\n thought = ''# tools.get_thought(response)\n\n if isinstance(selected_action, SetTextEvent):\n if input_text != \"N/A\" and input_text != None:\n selected_action.text = input_text.replace('\"', '').replace(' ', '-')\n if len(selected_action.text) > 30: # heuristically disable long text input\n selected_action.text = ''\n else:\n selected_action.text = ''\n self._save2yaml(file_name, state_prompt, idx, state_strs, inputs=selected_action.text)\n else:\n self._save2yaml(file_name, state_prompt, idx, state_strs, inputs='null')\n return selected_action, candidate_actions, selected_view_description, thought\n\n def _insert_predictions_into_state_prompt(self, state_prompt, current_state_item_descriptions):\n state_prompt_list = state_prompt.split('>\\n')\n item_list = []\n for view_desc in state_prompt_list:\n if view_desc[0] == ' ':\n view_desc = view_desc[1:]\n if view_desc[-1] != '>':\n view_desc = view_desc + '>'\n view_desc_without_id = tools.get_view_without_id(view_desc)\n if view_desc_without_id in current_state_item_descriptions.keys():\n prediction = 'title=' + current_state_item_descriptions[view_desc_without_id]\n view_desc_list = view_desc.split(' ', 2)\n if len(view_desc_list) > 2: # for example, <button id=3 class='More options' checked=False></button>\n inserted_view = view_desc_list[0] + ' ' + view_desc_list[1] + ' ' + prediction + ' ' + view_desc_list[2]\n else: # for example, <p id=4>June</p>\n latter_part = view_desc_list[1].split('>', 1)\n inserted_view = view_desc_list[0] + ' ' + latter_part[0] + ' ' + prediction + '>' + latter_part[1]\n if inserted_view[-1] != '>':\n inserted_view += '>'\n item_list.append(inserted_view)\n else:\n item_list.append(view_desc)\n return '\\n'.join(item_list)\n\n def _get_item_prediction(self, action_history, state_prompt, state_str):\n '''\n find the most match history_state in memory_graph based on action_history. \n match the current items in device_state with the history items in history_state, \n return the predicted screen after touching the item\n if can not find the device_state not in action_history, return None, can decide whether to explore\n '''\n def parse_history_views(history):\n parsed_views = []\n for history_action in history:\n history_action_list = history_action.split(': ', 1)\n if 'launchApp' in history_action:\n return []\n latter_part = history_action_list[1]\n if ' InputText:' in latter_part:\n target_view = latter_part.split(' InputText:', 1)[0]\n elif ' Reason:' in latter_part:\n target_view = latter_part.split(' Reason:', 1)[0]\n else:\n target_view = latter_part\n parsed_views.append(target_view)\n return parsed_views\n \n action_history = parse_history_views(action_history[1:]) # ignore the first action, which is launching the app\n \n # search the current state str in memory based on history actions\n current_state_str = self.memory.get_first_state_str()\n next_state_str = None\n for actionid in range(0, len(action_history)):\n actioned_view = action_history[actionid] #action_history[actionid].rsplit('.', 1)[0]\n next_state_str = self.memory.get_successor_by_node_edge(current_state_str, actioned_view)\n current_state_str = next_state_str\n # the past actions have lead to a state that does not exist in the memory\n if next_state_str == None:\n break\n if next_state_str == None:\n current_state_str = state_str\n # now, current_state_str is the current device state string, we should add all its successors' information into the items on this device state\n current_state_item_descriptions = self.memory.get_predictions_of_items(current_state_str)\n # import pdb;pdb.set_trace()\n if current_state_item_descriptions is None:\n return 'no_description' # there is no description of the current state, either it is the leaf node or it was not explored\n # import pdb;pdb.set_trace()\n return self._insert_predictions_into_state_prompt(state_prompt, current_state_item_descriptions)" }, { "identifier": "POLICY_NAIVE_DFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_NAIVE_DFS = \"dfs_naive\"" }, { "identifier": "POLICY_GREEDY_DFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_GREEDY_DFS = \"dfs_greedy\"" }, { "identifier": "POLICY_NAIVE_BFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_NAIVE_BFS = \"bfs_naive\"" }, { "identifier": "POLICY_GREEDY_BFS", "path": "droidbot/input_policy.py", "snippet": "POLICY_GREEDY_BFS = \"bfs_greedy\"" }, { "identifier": "POLICY_REPLAY", "path": "droidbot/input_policy.py", "snippet": "POLICY_REPLAY = \"replay\"" }, { "identifier": "POLICY_MEMORY_GUIDED", "path": "droidbot/input_policy.py", "snippet": "POLICY_MEMORY_GUIDED = \"memory_guided\" # implemented in input_policy2" }, { "identifier": "POLICY_MANUAL", "path": "droidbot/input_policy.py", "snippet": "POLICY_MANUAL = \"manual\"" }, { "identifier": "POLICY_MONKEY", "path": "droidbot/input_policy.py", "snippet": "POLICY_MONKEY = \"monkey\"" }, { "identifier": "POLICY_NONE", "path": "droidbot/input_policy.py", "snippet": "POLICY_NONE = \"none\"" }, { "identifier": "POLICY_TASK", "path": "droidbot/input_policy.py", "snippet": "POLICY_TASK = \"task\"" } ]
import json import logging import subprocess import time from .input_event import EventLog from .input_policy import UtgBasedInputPolicy, UtgNaiveSearchPolicy, UtgGreedySearchPolicy, \ UtgReplayPolicy, \ ManualPolicy, TaskPolicy, \ POLICY_NAIVE_DFS, POLICY_GREEDY_DFS, \ POLICY_NAIVE_BFS, POLICY_GREEDY_BFS, \ POLICY_REPLAY, POLICY_MEMORY_GUIDED, \ POLICY_MANUAL, POLICY_MONKEY, POLICY_NONE, POLICY_TASK from .input_script import DroidBotScript from .input_policy2 import MemoryGuidedPolicy
13,475
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]:
DEFAULT_POLICY = POLICY_GREEDY_DFS DEFAULT_EVENT_INTERVAL = 1 DEFAULT_EVENT_COUNT = 100000000 DEFAULT_TIMEOUT = -1 class UnknownInputException(Exception): pass class InputManager(object): """ This class manages all events to send during app running """ def __init__(self, device, app, task, policy_name, random_input, event_count, event_interval, script_path=None, profiling_method=None, master=None, replay_output=None): """ manage input event sent to the target device :param device: instance of Device :param app: instance of App :param policy_name: policy of generating events, string :return: """ self.logger = logging.getLogger('InputEventManager') self.enabled = True self.device = device self.app = app self.task = task self.policy_name = policy_name self.random_input = random_input self.events = [] self.policy = None self.script = None self.event_count = event_count self.event_interval = event_interval self.replay_output = replay_output self.monkey = None if script_path is not None: f = open(script_path, 'r') script_dict = json.load(f) self.script = DroidBotScript(script_dict) self.policy = self.get_input_policy(device, app, master) self.profiling_method = profiling_method def get_input_policy(self, device, app, master): if self.policy_name == POLICY_NONE: input_policy = None elif self.policy_name == POLICY_MONKEY: input_policy = None elif self.policy_name in [POLICY_NAIVE_DFS, POLICY_NAIVE_BFS]: input_policy = UtgNaiveSearchPolicy(device, app, self.random_input, self.policy_name) elif self.policy_name in [POLICY_GREEDY_DFS, POLICY_GREEDY_BFS]:
input_policy = UtgGreedySearchPolicy(device, app, self.random_input, self.policy_name)
3
2023-10-23 03:32:58+00:00
16k
openvpi/SingingVocoders
training/univnet_nsf_msd.py
[ { "identifier": "MultiScaleDiscriminator", "path": "models/nsf_HiFigan/models.py", "snippet": "class MultiScaleDiscriminator(torch.nn.Module):\n def __init__(self):\n super(MultiScaleDiscriminator, self).__init__()\n self.discriminators = nn.ModuleList(\n [\n DiscriminatorS(use_spectral_norm=True),\n DiscriminatorS(),\n DiscriminatorS(),\n ]\n )\n self.meanpools = nn.ModuleList(\n [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]\n )\n\n def forward(self, y):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n if i != 0:\n y = self.meanpools[i - 1](y)\n\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs," }, { "identifier": "nsfUnivNet", "path": "models/nsf_univnet/nsfunivnet.py", "snippet": "class nsfUnivNet(torch.nn.Module):\n \"\"\"Parallel WaveGAN Generator module.\"\"\"\n\n def __init__(self, h, use_weight_norm=True):\n\n super().__init__()\n\n # self.ddsp=DDSP(h)\n self.m_source = SourceModuleHnNSF(\n sampling_rate=h['audio_sample_rate'],\n harmonic_num=8\n )\n self.upp = int(np.prod(h['hop_size']))\n\n\n\n in_channels = h['model_args']['cond_in_channels']\n out_channels = h['model_args']['out_channels']\n inner_channels = h['model_args']['cg_channels']\n cond_channels = h['audio_num_mel_bins']\n upsample_ratios = h['model_args']['upsample_rates']\n lvc_layers_each_block = h['model_args']['num_lvc_blocks']\n lvc_kernel_size = h['model_args']['lvc_kernels']\n kpnet_hidden_channels = h['model_args']['lvc_hidden_channels']\n kpnet_conv_size = h['model_args']['lvc_conv_size']\n dropout = h['model_args']['dropout']\n # upsample_ratios:list\n self.ddspd = ddsp_down(dims=inner_channels,downs=upsample_ratios.copy(),)\n\n upmel=h['model_args'].get('upmel')\n self.upblocke=torch.nn.Sequential(*[Upspamper() for i in range(upmel//2)]) if upmel is not None or upmel==1 else torch.nn.Identity()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.cond_channels = cond_channels\n self.lvc_block_nums = len(upsample_ratios)\n\n # define first convolution\n self.first_conv = torch.nn.Conv1d(in_channels, inner_channels,\n kernel_size=7, padding=(7 - 1) // 2,\n dilation=1, bias=True)\n\n # define residual blocks\n self.lvc_blocks = torch.nn.ModuleList()\n cond_hop_length = 1\n for n in range(self.lvc_block_nums):\n cond_hop_length = cond_hop_length * upsample_ratios[n]\n lvcb = LVCBlock(\n in_channels=inner_channels,\n cond_channels=cond_channels,\n upsample_ratio=upsample_ratios[n],\n conv_layers=lvc_layers_each_block,\n conv_kernel_size=lvc_kernel_size,\n cond_hop_length=cond_hop_length,\n kpnet_hidden_channels=kpnet_hidden_channels,\n kpnet_conv_size=kpnet_conv_size,\n kpnet_dropout=dropout,\n )\n self.lvc_blocks += [lvcb]\n\n # define output layers\n self.last_conv_layers = torch.nn.ModuleList([\n torch.nn.Conv1d(inner_channels, out_channels, kernel_size=7, padding=(7 - 1) // 2,\n dilation=1, bias=True),\n\n ])\n\n # apply weight norm\n if use_weight_norm:\n self.apply_weight_norm()\n\n def forward(self, x, c,f0,infer=False):\n \"\"\"Calculate forward propagation.\n Args:\n x (Tensor): Input noise signal (B, 1, T).\n c (Tensor): Local conditioning auxiliary features (B, C ,T').\n Returns:\n Tensor: Output tensor (B, out_channels, T)\n \"\"\"\n pass\n # ddspwav,s_h,s_n=self.ddsp(mel=c,f0=f0,infer=infer)\n har_source = self.m_source(f0, self.upp).transpose(1, 2)\n specl=self.ddspd(har_source)\n\n x = self.first_conv(x)\n c=self.upblocke(c)\n\n for n in range(self.lvc_block_nums):\n x = self.lvc_blocks[n](x, c,specl[n])\n\n # apply final layers\n for f in self.last_conv_layers:\n x = F.leaky_relu(x, LRELU_SLOPE)\n x = f(x)\n x = torch.tanh(x)\n return x,har_source\n\n def remove_weight_norm(self):\n \"\"\"Remove weight normalization module from all of the layers.\"\"\"\n def _remove_weight_norm(m):\n try:\n logging.debug(f\"Weight norm is removed from {m}.\")\n torch.nn.utils.remove_weight_norm(m)\n except ValueError: # this module didn't have weight norm\n return\n\n self.apply(_remove_weight_norm)\n\n def apply_weight_norm(self):\n \"\"\"Apply weight normalization module from all of the layers.\"\"\"\n def _apply_weight_norm(m):\n if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):\n torch.nn.utils.weight_norm(m)\n logging.debug(f\"Weight norm is applied to {m}.\")\n\n self.apply(_apply_weight_norm)\n\n @staticmethod\n def _get_receptive_field_size(layers, stacks, kernel_size,\n dilation=lambda x: 2 ** x):\n assert layers % stacks == 0\n layers_per_cycle = layers // stacks\n dilations = [dilation(i % layers_per_cycle) for i in range(layers)]\n return (kernel_size - 1) * sum(dilations) + 1\n\n @property\n def receptive_field_size(self):\n \"\"\"Return receptive field size.\"\"\"\n return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size)\n\n def inference(self, c=None, x=None):\n \"\"\"Perform inference.\n Args:\n c (Union[Tensor, ndarray]): Local conditioning auxiliary features (T' ,C).\n x (Union[Tensor, ndarray]): Input noise signal (T, 1).\n Returns:\n Tensor: Output tensor (T, out_channels)\n \"\"\"\n if x is not None:\n if not isinstance(x, torch.Tensor):\n x = torch.tensor(x, dtype=torch.float).to(next(self.parameters()).device)\n x = x.transpose(1, 0).unsqueeze(0)\n else:\n assert c is not None\n x = torch.randn(1, 1, len(c) * self.upsample_factor).to(next(self.parameters()).device)\n if c is not None:\n if not isinstance(c, torch.Tensor):\n c = torch.tensor(c, dtype=torch.float).to(next(self.parameters()).device)\n c = c.transpose(1, 0).unsqueeze(0)\n c = torch.nn.ReplicationPad1d(self.aux_context_window)(c)\n return self.forward(x, c).squeeze(0).transpose(1, 0)" }, { "identifier": "nsf_univloss", "path": "modules/loss/nsf_univloss.py", "snippet": "class nsf_univloss(nn.Module):\n def __init__(self, config: dict):\n super().__init__()\n self.mel = PitchAdjustableMelSpectrogram(sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'], )\n self.L1loss = nn.L1Loss()\n self.labauxloss = config.get('lab_aux_loss', 45)\n self.labddsploss=config.get('lab_ddsp_loss', 2)\n # self.stft=warp_stft({'fft_sizes':[1024, 2048, 512,],'hop_sizes':[120, 240, 50,],'win_lengths':[600, 1200, 240,]})\n\n # self.stft = warp_stft(\n # {'fft_sizes': [2048, 2048, 4096, 1024, 512, 256, 128], 'hop_sizes': [512, 240, 480, 100, 50, 25, 12],\n # 'win_lengths': [2048, 1200, 2400, 480, 240, 120, 60]})\n self.stft = warp_stft({'fft_sizes': config['loss_fft_sizes'], 'hop_sizes': config['loss_hop_sizes'],\n 'win_lengths': config['loss_win_lengths']})\n\n self.deuv = config.get('detuv', 2000)\n\n # self.ddsploss = HybridLoss(block_size=config['hop_size'], fft_min=config['ddsp_fftmin'],\n # fft_max=config['ddsp_fftmax'], n_scale=config['ddsp_nscale'],\n # lambda_uv=config['ddsp_lambdauv'], device='cuda')\n # fft_sizes = [2048, 4096, 1024, 512, 256, 128],\n # hop_sizes = [240, 480, 100, 50, 25, 12],\n # win_lengths = [1200, 2400, 480, 240, 120, 60]\n\n def discriminator_loss(self, disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses = 0\n glosses = 0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses += r_loss.item()\n glosses += g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses, glosses, r_losses, g_losses\n\n def Dloss(self, Dfake, Dtrue):\n\n (Fmrd_out, _), (Fmpd_out, _) = Dfake\n (Tmrd_out, _), (Tmpd_out, _) = Dtrue\n mrdloss, mrdrlosses, mrdglosses, _, _ = self.discriminator_loss(Tmrd_out, Fmrd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss = mrdloss + mpdloss\n return loss, {'DmrdlossF': mrdglosses, 'DmrdlossT': mrdrlosses, 'DmpdlossT': mpdrlosses,\n 'DmpdlossF': mpdglosses}\n\n def feature_loss(self, fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self, GDfake, GDtrue):\n loss = 0\n gen_losses = []\n mrd_losses = 0\n mpd_losses = 0\n (mrd_out, Fmrd_feature), (mpd_out, Fmpd_feature) = GDfake\n (_, Tmrd_feature), (_, Tmpd_feature) = GDtrue\n for dg in mrd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mrd_losses = l + mrd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses = l + mpd_losses\n\n mrd_feature_loss = self.feature_loss(Tmrd_feature, Fmrd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss = mpd_feature_loss + mpd_losses + mrd_losses#+mrd_feature_loss\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmrdloss': mrd_losses, 'Gmpdloss': mpd_losses, 'Gmrd_feature_loss': mrd_feature_loss,\n 'Gmpd_feature_loss': mpd_feature_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # loss=self.L1loss(Gmel, Rmel)*self.labauxloss\n # return loss,{'auxloss':loss}\n\n def Auxloss(self, Goutput, sample, step):\n\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n detach_uv = False\n if step < self.deuv:\n detach_uv = True\n\n #\n # lossddsp, (loss_rss, loss_uv) = self.ddsploss(Goutput['ddspwav'].squeeze(1), Goutput['s_h'],\n # sample['audio'].squeeze(1),sample['uv'].float(),\n # detach_uv=detach_uv,\n # uv_tolerance=0.15)\n\n # lossddsp=0\n # loss_rss=0\n # loss_uv=0\n\n\n sc_loss, mag_loss = self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n loss = (sc_loss + mag_loss) * self.labauxloss\n return loss, {'auxloss': loss, 'auxloss_sc_loss': sc_loss, 'auxloss_mag_loss': mag_loss,}" }, { "identifier": "nsf_univloss_msd", "path": "modules/loss/nsf_univloss_msd.py", "snippet": "class nsf_univloss_msd(nn.Module):\n def __init__(self, config: dict):\n super().__init__()\n self.mel = PitchAdjustableMelSpectrogram(sample_rate=config['audio_sample_rate'],\n n_fft=config['fft_size'],\n win_length=config['win_size'],\n hop_length=config['hop_size'],\n f_min=config['fmin'],\n f_max=config['fmax_for_loss'],\n n_mels=config['audio_num_mel_bins'], )\n self.L1loss = nn.L1Loss()\n self.labauxloss = config.get('lab_aux_loss', 45)\n self.labddsploss=config.get('lab_ddsp_loss', 2)\n # self.stft=warp_stft({'fft_sizes':[1024, 2048, 512,],'hop_sizes':[120, 240, 50,],'win_lengths':[600, 1200, 240,]})\n\n # self.stft = warp_stft(\n # {'fft_sizes': [2048, 2048, 4096, 1024, 512, 256, 128], 'hop_sizes': [512, 240, 480, 100, 50, 25, 12],\n # 'win_lengths': [2048, 1200, 2400, 480, 240, 120, 60]})\n self.stft = warp_stft({'fft_sizes': config['loss_fft_sizes'], 'hop_sizes': config['loss_hop_sizes'],\n 'win_lengths': config['loss_win_lengths']})\n\n self.deuv = config.get('detuv', 2000)\n\n # self.ddsploss = HybridLoss(block_size=config['hop_size'], fft_min=config['ddsp_fftmin'],\n # fft_max=config['ddsp_fftmax'], n_scale=config['ddsp_nscale'],\n # lambda_uv=config['ddsp_lambdauv'], device='cuda')\n # fft_sizes = [2048, 4096, 1024, 512, 256, 128],\n # hop_sizes = [240, 480, 100, 50, 25, 12],\n # win_lengths = [1200, 2400, 480, 240, 120, 60]\n\n def discriminator_loss(self, disc_real_outputs, disc_generated_outputs):\n loss = 0\n rlosses = 0\n glosses = 0\n r_losses = []\n g_losses = []\n\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg ** 2)\n loss += r_loss + g_loss\n rlosses += r_loss.item()\n glosses += g_loss.item()\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, rlosses, glosses, r_losses, g_losses\n\n def Dloss(self, Dfake, Dtrue):\n\n (Fmrd_out, _), (Fmpd_out, _) = Dfake\n (Tmrd_out, _), (Tmpd_out, _) = Dtrue\n mrdloss, mrdrlosses, mrdglosses, _, _ = self.discriminator_loss(Tmrd_out, Fmrd_out)\n mpdloss, mpdrlosses, mpdglosses, _, _ = self.discriminator_loss(Tmpd_out, Fmpd_out)\n loss = mrdloss + mpdloss\n return loss, {'DmrdlossF': mrdglosses, 'DmrdlossT': mrdrlosses, 'DmpdlossT': mpdrlosses,\n 'DmpdlossF': mpdglosses}\n\n def feature_loss(self, fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2\n\n def GDloss(self, GDfake, GDtrue):\n loss = 0\n gen_losses = []\n mrd_losses = 0\n mpd_losses = 0\n (mrd_out, Fmrd_feature), (mpd_out, Fmpd_feature) = GDfake\n (_, Tmrd_feature), (_, Tmpd_feature) = GDtrue\n for dg in mrd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mrd_losses = l + mrd_losses\n\n for dg in mpd_out:\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l.item())\n # loss += l\n mpd_losses = l + mpd_losses\n\n mrd_feature_loss = self.feature_loss(Tmrd_feature, Fmrd_feature)\n mpd_feature_loss = self.feature_loss(Tmpd_feature, Fmpd_feature)\n # loss +=msd_feature_loss\n # loss +=mpd_feature_loss\n loss = mpd_feature_loss + mpd_losses + mrd_losses+mrd_feature_loss\n # (msd_losses, mpd_losses), (msd_feature_loss, mpd_feature_loss), gen_losses\n return loss, {'Gmrdloss': mrd_losses, 'Gmpdloss': mpd_losses, 'Gmrd_feature_loss': mrd_feature_loss,\n 'Gmpd_feature_loss': mpd_feature_loss}\n\n # def Auxloss(self,Goutput, sample):\n #\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n # loss=self.L1loss(Gmel, Rmel)*self.labauxloss\n # return loss,{'auxloss':loss}\n\n def Auxloss(self, Goutput, sample, step):\n\n # Gmel=self.mel.dynamic_range_compression_torch(self.mel(Goutput['audio'].squeeze(1)))\n # # Rmel=sample['mel']\n # Rmel = self.mel.dynamic_range_compression_torch(self.mel(sample['audio'].squeeze(1)))\n detach_uv = False\n if step < self.deuv:\n detach_uv = True\n\n #\n # lossddsp, (loss_rss, loss_uv) = self.ddsploss(Goutput['ddspwav'].squeeze(1), Goutput['s_h'],\n # sample['audio'].squeeze(1),sample['uv'].float(),\n # detach_uv=detach_uv,\n # uv_tolerance=0.15)\n\n # lossddsp=0\n # loss_rss=0\n # loss_uv=0\n\n\n sc_loss, mag_loss = self.stft.stft(Goutput['audio'].squeeze(1), sample['audio'].squeeze(1))\n loss = (sc_loss + mag_loss) * self.labauxloss\n return loss, {'auxloss': loss, 'auxloss_sc_loss': sc_loss, 'auxloss_mag_loss': mag_loss,}" }, { "identifier": "MultiPeriodDiscriminator", "path": "modules/univ_D/discriminator.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self,periods=[2,3,5,7,11]):\n super(MultiPeriodDiscriminator, self).__init__()\n # self.discriminators = nn.ModuleList([\n # DiscriminatorP(2),\n # DiscriminatorP(3),\n # DiscriminatorP(5),\n # DiscriminatorP(7),\n # DiscriminatorP(11),\n # ])\n self.discriminators = nn.ModuleList([\n DiscriminatorP(i) for i in periods\n\n ])\n\n def forward(self, y,):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n\n return y_d_rs, fmap_rs" }, { "identifier": "MultiResSpecDiscriminator", "path": "modules/univ_D/discriminator.py", "snippet": "class MultiResSpecDiscriminator(torch.nn.Module):\n\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\"):\n\n super(MultiResSpecDiscriminator, self).__init__()\n # self.discriminators = nn.ModuleList([\n # SpecDiscriminator(fft_sizes[0], hop_sizes[0], win_lengths[0], window),\n # SpecDiscriminator(fft_sizes[1], hop_sizes[1], win_lengths[1], window),\n # SpecDiscriminator(fft_sizes[2], hop_sizes[2], win_lengths[2], window)\n # ])\n self.discriminators = nn.ModuleList([\n SpecDiscriminator(i[0], i[1], i[2], window) for i in zip(fft_sizes,hop_sizes,win_lengths)\n\n ])\n\n def forward(self, y,):\n y_d_rs = []\n\n fmap_rs = []\n\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n\n y_d_rs.append(y_d_r)\n fmap_rs.append(fmap_r)\n\n return y_d_rs, fmap_rs" }, { "identifier": "GanBaseTask", "path": "training/base_task_gan.py", "snippet": "class GanBaseTask(pl.LightningModule):\n \"\"\"\n Base class for training tasks.\n 1. *load_ckpt*:\n load checkpoint;\n 2. *training_step*:\n record and log the loss;\n 3. *optimizer_step*:\n run backwards step;\n 4. *start*:\n load training configs, backup code, log to tensorboard, start training;\n 5. *configure_ddp* and *init_ddp_connection*:\n start parallel training.\n\n Subclasses should define:\n 1. *build_model*, *build_optimizer*, *build_scheduler*:\n how to build the model, the optimizer and the training scheduler;\n 2. *_training_step*:\n one training step of the model;\n 3. *on_validation_end* and *_on_validation_end*:\n postprocess the validation output.\n \"\"\"\n\n def __init__(self, config: dict, *args, **kwargs):\n # dataset configs\n super().__init__(*args, **kwargs)\n self.dataset_cls = None\n self.config = config\n # self.max_batch_frames = self.config['max_batch_frames']\n # self.max_batch_size = self.config['max_batch_size']\n # self.max_val_batch_frames = self.config['max_val_batch_frames']\n # self.max_val_batch_size = self.config['max_val_batch_size']\n\n # self.accumulate_grad_batches = self.config['accumulate_grad_batches']\n self.clip_grad_norm = self.config['clip_grad_norm']\n\n self.training_sampler = None\n self.model = None\n self.generator = None\n self.discriminator = None\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = False\n\n self.valid_losses: Dict[str, Metric] = {\n 'total_loss': MeanMetric()\n }\n self.valid_metric_names = set()\n self.mix_loss = None\n\n self.automatic_optimization = False\n self.skip_immediate_validations = 0\n\n self.aux_step = self.config.get('aux_step')\n self.train_dataset = None\n self.valid_dataset = None\n\n ###########\n\n # Training, validation and testing\n ###########\n def setup(self, stage):\n self.model = self.build_model()\n self.unfreeze_all_params()\n if self.config['freezing_enabled']:\n self.freeze_params()\n if self.config['finetune_enabled'] and get_latest_checkpoint_path(\n pathlib.Path(self.config['work_dir'])) is None:\n self.load_finetune_ckpt(self.load_pre_train_model())\n self.print_arch()\n self.build_losses_and_metrics()\n self.build_dataset()\n # self.train_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['train_set_name'], allow_aug=True\n # )\n # self.valid_dataset = self.dataset_cls(\n # config=self.config, data_dir=self.config['binary_data_dir'],\n # prefix=self.config['valid_set_name'], allow_aug=False\n # )\n\n def build_dataset(self):\n raise NotImplementedError()\n\n def get_need_freeze_state_dict_key(self, model_state_dict) -> list:\n key_list = []\n for i in self.config['frozen_params']:\n for j in model_state_dict:\n if j.startswith(i):\n key_list.append(j)\n return list(set(key_list))\n\n def freeze_params(self) -> None:\n model_state_dict = self.state_dict().keys()\n freeze_key = self.get_need_freeze_state_dict_key(model_state_dict=model_state_dict)\n\n for i in freeze_key:\n params = self.get_parameter(i)\n\n params.requires_grad = False\n\n def unfreeze_all_params(self) -> None:\n for i in self.parameters():\n i.requires_grad = True\n\n def load_finetune_ckpt(\n self, state_dict\n ) -> None:\n\n adapt_shapes = self.config['finetune_strict_shapes']\n if not adapt_shapes:\n cur_model_state_dict = self.state_dict()\n unmatched_keys = []\n for key, param in state_dict.items():\n if key in cur_model_state_dict:\n new_param = cur_model_state_dict[key]\n if new_param.shape != param.shape:\n unmatched_keys.append(key)\n print('| Unmatched keys: ', key, new_param.shape, param.shape)\n for key in unmatched_keys:\n del state_dict[key]\n self.load_state_dict(state_dict, strict=False)\n\n def load_pre_train_model(self):\n\n pre_train_ckpt_path = self.config.get('finetune_ckpt_path')\n blacklist = self.config.get('finetune_ignored_params')\n if blacklist is None:\n blacklist = []\n # if whitelist is None:\n # raise RuntimeError(\"\")\n\n if pre_train_ckpt_path is not None:\n ckpt = torch.load(pre_train_ckpt_path)\n\n state_dict = {}\n for i in ckpt['state_dict']:\n # if 'diffusion' in i:\n # if i in rrrr:\n # continue\n skip = False\n for b in blacklist:\n if i.startswith(b):\n skip = True\n break\n\n if skip:\n continue\n\n state_dict[i] = ckpt['state_dict'][i]\n print(i)\n return state_dict\n else:\n raise RuntimeError(\"\")\n\n def build_model(self):\n raise NotImplementedError()\n\n @rank_zero_only\n def print_arch(self):\n utils.print_arch(self)\n\n def build_losses_and_metrics(self):\n raise NotImplementedError()\n\n def register_metric(self, name: str, metric: Metric):\n assert isinstance(metric, Metric)\n setattr(self, name, metric)\n self.valid_metric_names.add(name)\n\n # def run_model(self, sample, infer=False):\n # \"\"\"\n # steps:\n # 1. run the full model\n # 2. calculate losses if not infer\n # \"\"\"\n # raise NotImplementedError()\n\n def Gforward(self, sample, infer=False):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n def Dforward(self, Goutput):\n \"\"\"\n steps:\n 1. run the full model\n 2. calculate losses if not infer\n \"\"\"\n raise NotImplementedError()\n\n # def on_train_epoch_start(self):\n # if self.training_sampler is not None:\n # self.training_sampler.set_epoch(self.current_epoch)\n\n def _training_step(self, sample, batch_idx):\n \"\"\"\n :return: total loss: torch.Tensor, loss_log: dict, other_log: dict\n\n \"\"\"\n aux_only = False\n if self.aux_step is not None:\n if self.aux_step > self.global_step:\n aux_only = True\n\n log_diet = {}\n opt_g, opt_d = self.optimizers()\n Goutput = self.Gforward(sample=sample)\n if not aux_only:\n Dfake = self.Dforward(Goutput=Goutput['audio'].detach())\n Dtrue = self.Dforward(Goutput=sample['audio'])\n Dloss, Dlog = self.mix_loss.Dloss(Dfake=Dfake, Dtrue=Dtrue)\n log_diet.update(Dlog)\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Dloss/self.clip_grad_norm)\n # else:\n opt_d.zero_grad()\n self.manual_backward(Dloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_d, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_d.step()\n opt_d.zero_grad()\n if not aux_only:\n GDfake = self.Dforward(Goutput=Goutput['audio'])\n GDtrue = self.Dforward(Goutput=sample['audio'])\n GDloss, GDlog = self.mix_loss.GDloss(GDfake=GDfake,GDtrue=GDtrue)\n log_diet.update(GDlog)\n Auxloss, Auxlog = self.mix_loss.Auxloss(Goutput=Goutput, sample=sample)\n\n log_diet.update(Auxlog)\n if not aux_only:\n Gloss=GDloss + Auxloss\n else:\n Gloss=Auxloss\n\n # if self.clip_grad_norm is not None:\n # self.manual_backward(Gloss / self.clip_grad_norm)\n # else:\n # self.manual_backward(Gloss)\n # if (batch_idx + 1) % self.accumulate_grad_batches == 0:\n opt_g.zero_grad()\n self.manual_backward(Gloss)\n if self.clip_grad_norm is not None:\n self.clip_gradients(opt_g, gradient_clip_val=self.clip_grad_norm, gradient_clip_algorithm=\"norm\")\n opt_g.step()\n\n\n\n return log_diet\n\n def training_step(self, sample, batch_idx, ): # todo\n log_outputs = self._training_step(sample, batch_idx)\n\n # logs to progress bar\n self.log_dict({'loss':sum(log_outputs.values())}, prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # self.log('lr', self.lr_schedulers().get_last_lr()[0], prog_bar=True, logger=False, on_step=True, on_epoch=False)\n # logs to tensorboard\n if self.global_step % self.config['log_interval'] == 0:\n tb_log = {f'training/{k}': v for k, v in log_outputs.items()}\n # tb_log['training/lr'] = self.lr_schedulers().get_last_lr()[0]\n self.logger.log_metrics(tb_log, step=self.global_step)\n #\n # return total_loss\n\n # def on_before_optimizer_step(self, *args, **kwargs):\n # self.log_dict(grad_norm(self, norm_type=2))\n\n def _on_validation_start(self):\n pass\n\n def on_validation_start(self):\n self._on_validation_start()\n for metric in self.valid_losses.values():\n metric.to(self.device)\n metric.reset()\n\n def _validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n :return: loss_log: dict, weight: int\n \"\"\"\n raise NotImplementedError()\n\n def validation_step(self, sample, batch_idx):\n \"\"\"\n\n :param sample:\n :param batch_idx:\n\n \"\"\"\n\n # if self.skip_immediate_validations == 0 and self.global_step != 0:\n # self.skip_immediate_validation = True\n # self.skip_immediate_validations = 1\n # if self.global_step == 0:\n # self.skip_immediate_validations = 1\n\n if self.skip_immediate_validation:\n rank_zero_debug(f\"Skip validation {batch_idx}\")\n return {}\n with torch.autocast(self.device.type, enabled=False):\n losses, weight = self._validation_step(sample, batch_idx)\n losses = {\n 'total_loss': sum(losses.values()),\n **losses\n }\n for k, v in losses.items():\n if k not in self.valid_losses:\n self.valid_losses[k] = MeanMetric().to(self.device)\n self.valid_losses[k].update(v, weight=weight) # weight=1\n return losses\n\n def on_validation_epoch_end(self):\n if self.skip_immediate_validation:\n self.skip_immediate_validation = False\n self.skip_immediate_ckpt_save = True\n return\n loss_vals = {k: v.compute() for k, v in self.valid_losses.items()}\n self.log('val_loss', loss_vals['total_loss'], on_epoch=True, prog_bar=True, logger=False, sync_dist=True)\n self.logger.log_metrics({f'validation/{k}': v for k, v in loss_vals.items()}, step=self.global_step)\n for metric in self.valid_losses.values():\n metric.reset()\n metric_vals = {k: getattr(self, k).compute() for k in self.valid_metric_names}\n self.logger.log_metrics({f'metrics/{k}': v for k, v in metric_vals.items()}, step=self.global_step)\n for metric_name in self.valid_metric_names:\n getattr(self, metric_name).reset()\n\n # noinspection PyMethodMayBeStatic\n def build_scheduler(self, optimizer):\n from utils import build_lr_scheduler_from_config\n\n scheduler_args = self.config['lr_scheduler_args']\n assert scheduler_args['scheduler_cls'] != ''\n scheduler = build_lr_scheduler_from_config(optimizer, scheduler_args)\n return scheduler\n\n # noinspection PyMethodMayBeStatic\n def build_optimizer(self, model, optimizer_args):\n from utils import build_object_from_class_name\n\n assert optimizer_args['optimizer_cls'] != ''\n if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n\n if isinstance(model, nn.ModuleList):\n parameterslist = []\n for i in model:\n parameterslist = parameterslist + list(i.parameters())\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.ModuleDict):\n parameterslist = []\n for i in model:\n # parameterslist = parameterslist + list(model[i].parameters())\n parameterslist.append({'params': model[i].parameters()})\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n parameterslist,\n **optimizer_args\n )\n elif isinstance(model, nn.Module):\n\n optimizer = build_object_from_class_name(\n optimizer_args['optimizer_cls'],\n torch.optim.Optimizer,\n model.parameters(),\n **optimizer_args\n )\n else:\n raise RuntimeError(\"\")\n\n return optimizer\n\n def configure_optimizers(self):\n optG = self.build_optimizer(self.generator, optimizer_args=self.config['generater_optimizer_args'])\n optD = self.build_optimizer(self.discriminator, optimizer_args=self.config['discriminate_optimizer_args'])\n\n return [optG, optD]\n # scheduler = self.build_scheduler(optm)\n # if scheduler is None:\n # return optm\n # return {\n # \"optimizer\": optm,\n # \"lr_scheduler\": {\n # \"scheduler\": scheduler,\n # \"interval\": \"step\",\n # \"frequency\": 1\n # }\n # }\n\n def train_dataloader(self):\n # self.training_sampler = DsBatchSampler(\n # self.train_dataset,\n # max_batch_frames=self.max_batch_frames,\n # max_batch_size=self.max_batch_size,\n # num_replicas=(self.trainer.distributed_sampler_kwargs or {}).get('num_replicas', 1),\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # sort_by_similar_size=self.config['sort_by_len'],\n # required_batch_count_multiple=self.config['accumulate_grad_batches'],\n # frame_count_grid=self.config['sampler_frame_count_grid'],\n # shuffle_sample=True,\n # shuffle_batch=False,\n # seed=self.config['seed']\n # )\n return torch.utils.data.DataLoader(self.train_dataset,\n collate_fn=self.train_dataset.collater,\n batch_size=self.config['batch_size'],\n # batch_sampler=self.training_sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n pin_memory=True,\n persistent_workers=True)\n\n def val_dataloader(self):\n # sampler = DsEvalBatchSampler(\n # self.valid_dataset,\n # max_batch_frames=self.max_val_batch_frames,\n # max_batch_size=self.max_val_batch_size,\n # rank=(self.trainer.distributed_sampler_kwargs or {}).get('rank', 0),\n # batch_by_size=False\n # )\n return torch.utils.data.DataLoader(self.valid_dataset,\n collate_fn=self.valid_dataset.collater,\n batch_size=1,\n # batch_sampler=sampler,\n num_workers=self.config['ds_workers'],\n prefetch_factor=self.config['dataloader_prefetch_factor'],\n shuffle=False)\n\n def test_dataloader(self):\n return self.val_dataloader()\n\n def on_test_start(self):\n self.on_validation_start()\n\n def test_step(self, sample, batch_idx):\n return self.validation_step(sample, batch_idx)\n\n def on_test_end(self):\n return self.on_validation_end()\n\n def on_save_checkpoint(self, checkpoint):\n pass\n # checkpoint['trainer_stage'] = self.trainer.state.stage.value\n\n # def on_load_checkpoint(self, checkpoint):\n # # from lightning.pytorch.trainer.states import RunningStage\n # from utils import simulate_lr_scheduler\n # # if checkpoint.get('trainer_stage', '') == RunningStage.VALIDATING.value:\n # # self.skip_immediate_validation = True\n #\n # optimizer_args = self.config['optimizer_args']\n # scheduler_args = self.config['lr_scheduler_args']\n #\n # if 'beta1' in optimizer_args and 'beta2' in optimizer_args and 'betas' not in optimizer_args:\n # optimizer_args['betas'] = (optimizer_args['beta1'], optimizer_args['beta2'])\n #\n # if checkpoint.get('optimizer_states', None):\n # opt_states = checkpoint['optimizer_states']\n # assert len(opt_states) == 1 # only support one optimizer\n # opt_state = opt_states[0]\n # for param_group in opt_state['param_groups']:\n # for k, v in optimizer_args.items():\n # if k in param_group and param_group[k] != v:\n # if 'lr_schedulers' in checkpoint and checkpoint['lr_schedulers'] and k == 'lr':\n # continue\n # rank_zero_info(f'| Overriding optimizer parameter {k} from checkpoint: {param_group[k]} -> {v}')\n # param_group[k] = v\n # if 'initial_lr' in param_group and param_group['initial_lr'] != optimizer_args['lr']:\n # rank_zero_info(\n # f'| Overriding optimizer parameter initial_lr from checkpoint: {param_group[\"initial_lr\"]} -> {optimizer_args[\"lr\"]}'\n # )\n # param_group['initial_lr'] = optimizer_args['lr']\n #\n # if checkpoint.get('lr_schedulers', None):\n # assert checkpoint.get('optimizer_states', False)\n # assert len(checkpoint['lr_schedulers']) == 1 # only support one scheduler\n # checkpoint['lr_schedulers'][0] = simulate_lr_scheduler(\n # optimizer_args, scheduler_args,\n # step_count=checkpoint['global_step'],\n # num_param_groups=len(checkpoint['optimizer_states'][0]['param_groups'])\n # )\n # for param_group, new_lr in zip(\n # checkpoint['optimizer_states'][0]['param_groups'],\n # checkpoint['lr_schedulers'][0]['_last_lr'],\n # ):\n # if param_group['lr'] != new_lr:\n # rank_zero_info(\n # f'| Overriding optimizer parameter lr from checkpoint: {param_group[\"lr\"]} -> {new_lr}')\n # param_group['lr'] = new_lr" }, { "identifier": "PitchAdjustableMelSpectrogram", "path": "utils/wav2mel.py", "snippet": "class PitchAdjustableMelSpectrogram:\n def __init__(\n self,\n sample_rate=44100,\n n_fft=2048,\n win_length=2048,\n hop_length=512,\n f_min=40,\n f_max=16000,\n n_mels=128,\n center=False,\n ):\n self.sample_rate = sample_rate\n self.n_fft = n_fft\n self.win_size = win_length\n self.hop_length = hop_length\n self.f_min = f_min\n self.f_max = f_max\n self.n_mels = n_mels\n self.center = center\n\n self.mel_basis = {}\n self.hann_window = {}\n\n def __call__(self, y, key_shift=0, speed=1.0):\n factor = 2 ** (key_shift / 12)\n n_fft_new = int(np.round(self.n_fft * factor))\n win_size_new = int(np.round(self.win_size * factor))\n hop_length = int(np.round(self.hop_length * speed))\n\n # if torch.min(y) < -1.0:\n # logger.warning(f\"min value is {torch.min(y)}\")\n # if torch.max(y) > 1.0:\n # logger.warning(f\"max value is {torch.max(y)}\")\n\n mel_basis_key = f\"{self.f_max}_{y.device}\"\n if mel_basis_key not in self.mel_basis:\n mel = librosa_mel_fn(\n sr=self.sample_rate,\n n_fft=self.n_fft,\n n_mels=self.n_mels,\n fmin=self.f_min,\n fmax=self.f_max,\n )\n self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device)\n\n hann_window_key = f\"{key_shift}_{y.device}\"\n if hann_window_key not in self.hann_window:\n self.hann_window[hann_window_key] = torch.hann_window(\n win_size_new, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (\n int((win_size_new - hop_length) // 2),\n int((win_size_new - hop_length+1) // 2),\n ),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft_new,\n hop_length=hop_length,\n win_length=win_size_new,\n window=self.hann_window[hann_window_key],\n center=self.center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=True,\n ).abs()\n # spec = torch.view_as_real(spec)\n # spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))\n\n if key_shift != 0:\n size = self.n_fft // 2 + 1\n resize = spec.size(1)\n if resize < size:\n spec = F.pad(spec, (0, 0, 0, size - resize))\n\n spec = spec[:, :size, :] * self.win_size / win_size_new\n\n spec = torch.matmul(self.mel_basis[mel_basis_key], spec)\n\n return spec\n\n def dynamic_range_compression_torch(self,x, C=1, clip_val=1e-5):\n return torch.log(torch.clamp(x, min=clip_val) * C)" } ]
import pathlib import random import numpy as np import torch.utils.data import utils from matplotlib import pyplot as plt from torch import nn from torch.utils.data import Dataset from models.nsf_HiFigan.models import MultiScaleDiscriminator from models.nsf_univnet.nsfunivnet import nsfUnivNet from modules.loss.nsf_univloss import nsf_univloss from modules.loss.nsf_univloss_msd import nsf_univloss_msd from modules.univ_D.discriminator import MultiPeriodDiscriminator, MultiResSpecDiscriminator from training.base_task_gan import GanBaseTask from utils.wav2mel import PitchAdjustableMelSpectrogram
12,972
def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] del record['uv'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] record['uv']=record['uv'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] record['uv']=record['uv'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) uv=np.stack([record['uv'] for record in minibatch if 'uv' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),'uv':torch.from_numpy(uv) } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec
# import logging # import os # import sys # from typing import Dict # # import lightning.pytorch as pl # import matplotlib # from lightning.pytorch.utilities.rank_zero import rank_zero_debug, rank_zero_info, rank_zero_only # from torchmetrics import Metric, MeanMetric # from models.ddsp_univnet.ddspunivnet import ddspUnivNet # from models.univnet.univnet import UnivNet # from models.lvc_ddspgan.lvc_ddspgan import DDSPgan # from models.nsf_HiFigan.models import Generator, AttrDict, MultiScaleDiscriminator, MultiPeriodDiscriminator # from modules.loss.ddsp_univloss import ddsp_univloss def spec_to_figure(spec, vmin=None, vmax=None): if isinstance(spec, torch.Tensor): spec = spec.cpu().numpy() fig = plt.figure(figsize=(12, 9),dpi=100) plt.pcolor(spec.T, vmin=vmin, vmax=vmax) plt.tight_layout() return fig class ddsp_univ_dataset(Dataset): def __init__(self, config: dict, data_dir, infer=False): super().__init__() self.config = config self.data_dir = data_dir if isinstance(data_dir, pathlib.Path) else pathlib.Path(data_dir) with open(self.data_dir, 'r', encoding='utf8') as f: fills = f.read().strip().split('\n') self.data_index = fills self.infer = infer self.volume_aug = self.config['volume_aug'] self.volume_aug_prob = self.config['volume_aug_prob'] if not infer else 0 def __getitem__(self, index): data_path = self.data_index[index] data = np.load(data_path) return {'f0':data['f0'],'spectrogram':data['mel'],'audio':data['audio'],'uv':data['uv']} def __len__(self): return len(self.data_index) def collater(self, minibatch): samples_per_frame = self.config['hop_size'] if self.infer: crop_mel_frames = 0 else: crop_mel_frames = self.config['crop_mel_frames'] for record in minibatch: # Filter out records that aren't long enough. if len(record['spectrogram']) < crop_mel_frames: del record['spectrogram'] del record['audio'] del record['f0'] del record['uv'] continue start = random.randint(0, record['spectrogram'].shape[0] - 1 - crop_mel_frames) end = start + crop_mel_frames if self.infer: record['spectrogram'] = record['spectrogram'].T record['f0'] = record['f0'] record['uv']=record['uv'] else: record['spectrogram'] = record['spectrogram'][start:end].T record['f0'] = record['f0'][start:end] record['uv']=record['uv'][start:end] start *= samples_per_frame end *= samples_per_frame if self.infer: cty=(len(record['spectrogram'].T) * samples_per_frame) record['audio'] = record['audio'][:cty] record['audio'] = np.pad(record['audio'], ( 0, (len(record['spectrogram'].T) * samples_per_frame) - len(record['audio'])), mode='constant') pass else: # record['spectrogram'] = record['spectrogram'][start:end].T record['audio'] = record['audio'][start:end] record['audio'] = np.pad(record['audio'], (0, (end - start) - len(record['audio'])), mode='constant') if self.volume_aug: for record in minibatch: if random.random() < self.volume_aug_prob: audio = record['audio'] audio_mel = record['spectrogram'] max_amp = float(np.max(np.abs(audio))) + 1e-5 max_shift = min(3, np.log(1 / max_amp)) log_mel_shift = random.uniform(-3, max_shift) # audio *= (10 ** log_mel_shift) audio *= np.exp(log_mel_shift) audio_mel += log_mel_shift audio_mel = torch.clamp(torch.from_numpy(audio_mel), min=np.log(1e-5)).numpy() record['audio'] = audio record['spectrogram'] = audio_mel audio = np.stack([record['audio'] for record in minibatch if 'audio' in record]) spectrogram = np.stack([record['spectrogram'] for record in minibatch if 'spectrogram' in record]) f0 = np.stack([record['f0'] for record in minibatch if 'f0' in record]) uv=np.stack([record['uv'] for record in minibatch if 'uv' in record]) return { 'audio': torch.from_numpy(audio).unsqueeze(1), 'mel': torch.from_numpy(spectrogram), 'f0': torch.from_numpy(f0),'uv':torch.from_numpy(uv) } class stftlog: def __init__(self, n_fft=2048, win_length=2048, hop_length=512, center=False,): self.hop_length=hop_length self.win_size=win_length self.n_fft = n_fft self.win_size = win_length self.center = center self.hann_window = {} def exc(self,y): hann_window_key = f"{y.device}" if hann_window_key not in self.hann_window: self.hann_window[hann_window_key] = torch.hann_window( self.win_size, device=y.device ) y = torch.nn.functional.pad( y.unsqueeze(1), ( int((self.win_size - self.hop_length) // 2), int((self.win_size - self.hop_length+1) // 2), ), mode="reflect", ) y = y.squeeze(1) spec = torch.stft( y, self.n_fft, hop_length=self.hop_length, win_length=self.win_size, window=self.hann_window[hann_window_key], center=self.center, pad_mode="reflect", normalized=False, onesided=True, return_complex=True, ).abs() return spec
class nsf_univnet_task(GanBaseTask):
6
2023-10-17 13:45:09+00:00
16k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.utils.transform.Transform`.\n The data fields of all the instantiated sentences can be accessed as an attribute of the dataset.\n\n Args:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform` or its derivations.\n The instance holds a series of loading and processing behaviours with regard to the specific data format.\n data (Union[str, Iterable]):\n A filename or a list of instances that will be passed into :meth:`transform.load`.\n cache (bool):\n If ``True``, tries to use the previously cached binarized data for fast loading.\n In this way, sentences are loaded on-the-fly according to the meta data.\n If ``False``, all sentences will be directly loaded into the memory.\n Default: ``False``.\n binarize (bool):\n If ``True``, binarizes the dataset once building it. Only works if ``cache=True``. Default: ``False``.\n bin (str):\n Path for saving binarized files, required if ``cache=True``. Default: ``None``.\n max_len (int):\n Sentences exceeding the length will be discarded. Default: ``None``.\n kwargs (Dict):\n Together with `data`, kwargs will be passed into :meth:`transform.load` to control the loading behaviour.\n\n Attributes:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform`.\n sentences (List[Sentence]):\n A list of sentences loaded from the data.\n Each sentence includes fields obeying the data format defined in ``transform``.\n If ``cache=True``, each is a pointer to the sentence stored in the cache file.\n \"\"\"\n\n def __init__(\n self,\n transform: Transform,\n data: Union[str, Iterable],\n cache: bool = False,\n binarize: bool = False,\n bin: str = None,\n max_len: int = None,\n **kwargs\n ) -> Dataset:\n super(Dataset, self).__init__()\n\n self.transform = transform\n self.data = data\n self.cache = cache\n self.binarize = binarize\n self.bin = bin\n self.max_len = max_len or INF\n self.kwargs = kwargs\n\n if cache:\n if not isinstance(data, str) or not os.path.exists(data):\n raise FileNotFoundError(\"Only files are allowed for binarization, but not found\")\n if self.bin is None:\n self.fbin = data + '.pt'\n else:\n os.makedirs(self.bin, exist_ok=True)\n self.fbin = os.path.join(self.bin, os.path.split(data)[1]) + '.pt'\n if not self.binarize and os.path.exists(self.fbin):\n try:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n except Exception:\n raise RuntimeError(f\"Error found while debinarizing {self.fbin}, which may have been corrupted. \"\n \"Try re-binarizing it first\")\n else:\n self.sentences = list(transform.load(data, **kwargs))\n\n def __repr__(self):\n s = f\"{self.__class__.__name__}(\"\n s += f\"n_sentences={len(self.sentences)}\"\n if hasattr(self, 'loader'):\n s += f\", n_batches={len(self.loader)}\"\n if hasattr(self, 'buckets'):\n s += f\", n_buckets={len(self.buckets)}\"\n if self.shuffle:\n s += f\", seed={self.seed}\"\n if self.cache:\n s += f\", cache={self.cache}\"\n if self.binarize:\n s += f\", binarize={self.binarize}\"\n if self.max_len < INF:\n s += f\", max_len={self.max_len}\"\n s += \")\"\n return s\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, index):\n return debinarize(self.fbin, self.sentences[index]) if self.cache else self.sentences[index]\n\n def __getattr__(self, name):\n if name not in {f.name for f in self.transform.flattened_fields}:\n raise AttributeError\n if self.cache:\n if os.path.exists(self.fbin) and not self.binarize:\n sentences = self\n else:\n sentences = self.transform.load(self.data, **self.kwargs)\n return (getattr(sentence, name) for sentence in sentences)\n return [getattr(sentence, name) for sentence in self.sentences]\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n @lazy_property\n def sizes(self):\n if not self.cache:\n return [s.size for s in self.sentences]\n return debinarize(self.fbin, 'sizes')\n\n def build(\n self,\n batch_size: int,\n n_buckets: int = 1,\n shuffle: bool = False,\n distributed: bool = False,\n n_workers: int = 0,\n pin_memory: bool = True,\n chunk_size: int = 1000,\n seed: int = 1,\n ) -> Dataset:\n # numericalize all fields\n if not self.cache:\n self.sentences = [i for i in self.transform(self.sentences) if len(i) < self.max_len]\n else:\n # if not forced to do binarization and the binarized file already exists, directly load the meta file\n if os.path.exists(self.fbin) and not self.binarize:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n else:\n @contextmanager\n def cache(sentences):\n ftemp = tempfile.mkdtemp()\n fs = os.path.join(ftemp, 'sentences')\n fb = os.path.join(ftemp, os.path.basename(self.fbin))\n global global_transform\n global_transform = self.transform\n sentences = binarize({'sentences': progress_bar(sentences)}, fs)[1]['sentences']\n try:\n yield ((sentences[s:s+chunk_size], fs, f\"{fb}.{i}\", self.max_len)\n for i, s in enumerate(range(0, len(sentences), chunk_size)))\n finally:\n del global_transform\n shutil.rmtree(ftemp)\n\n def numericalize(sentences, fs, fb, max_len):\n sentences = global_transform((debinarize(fs, sentence) for sentence in sentences))\n sentences = [i for i in sentences if len(i) < max_len]\n return binarize({'sentences': sentences, 'sizes': [sentence.size for sentence in sentences]}, fb)[0]\n\n logger.info(f\"Seeking to cache the data to {self.fbin} first\")\n # numericalize the fields of each sentence\n if is_master():\n with cache(self.transform.load(self.data, **self.kwargs)) as chunks, mp.Pool(32) as pool:\n results = [pool.apply_async(numericalize, chunk) for chunk in chunks]\n self.sentences = binarize((r.get() for r in results), self.fbin, merge=True)[1]['sentences']\n if is_dist():\n dist.barrier()\n if not is_master():\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n # NOTE: the final bucket count is roughly equal to n_buckets\n self.buckets = dict(zip(*kmeans(self.sizes, n_buckets)))\n self.loader = DataLoader(transform=self.transform,\n dataset=self,\n batch_sampler=Sampler(self.buckets, batch_size, shuffle, distributed, seed=seed),\n num_workers=n_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory)\n self.seed = seed\n self.shuffle = shuffle\n return self" }, { "identifier": "map_token_ids", "path": "gec/fn.py", "snippet": "def map_token_ids(vocab_0, vocab_1, equal_labels=None):\n \"\"\"\n Map token ids from vocab_0 to vocab_1\n\n Args:\n vocab_0 (dict): vocab_0\n vocab_1 (dict): vocab_1\n equal_labels (dict): equal_labels\n \"\"\"\n if equal_labels is None:\n equal_labels = {}\n return [(i, vocab_1[equal_labels.get(k, k)]) for k, i in vocab_0.items()\n if k in vocab_1]" }, { "identifier": "PerplexityMetric", "path": "gec/metric.py", "snippet": "class PerplexityMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[torch.Tensor] = None,\n golds: Optional[torch.Tensor] = None,\n mask: Optional[torch.BoolTensor] = None,\n reverse: bool = True,\n eps: float = 1e-12) -> PerplexityMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_tokens = 0.\n\n self.tp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n\n self.total_loss = 0.\n\n if loss is not None:\n self(loss, preds, golds, mask)\n\n def __repr__(self):\n s = f\"loss: {self.loss:.4f} PPL: {self.ppl:.4f}\"\n if self.tp > 0:\n s += f\" - TGT: P: {self.p:6.2%} R: {self.r:6.2%} F0.5: {self.f:6.2%}\"\n return s\n\n def __call__(self, loss: float, preds: Tuple[List, torch.Tensor],\n golds: Tuple[List, torch.Tensor],\n mask: torch.BoolTensor) -> PerplexityMetric:\n n_tokens = mask.sum().item()\n self.n += len(mask)\n self.count += 1\n self.n_tokens += n_tokens\n self.total_loss += float(loss) * n_tokens\n\n if preds is not None:\n with tempfile.TemporaryDirectory() as t:\n fsrc, fpred, fgold = os.path.join(t, 'src'), os.path.join(\n t, 'pred'), os.path.join(t, 'gold')\n pred_m2, gold_m2 = os.path.join(t, 'pred.m2'), os.path.join(\n t, 'gold.m2')\n with open(fsrc, 'w') as fs, open(fpred, 'w') as f:\n for s, i in preds:\n fs.write(s + '\\n')\n f.write(i + '\\n')\n with open(fgold, 'w') as f:\n for _, i in golds:\n f.write(i + '\\n')\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fpred}',\n '-out', f'{pred_m2}'\n ])\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fgold}',\n '-out', f'{gold_m2}'\n ])\n out = subprocess.check_output(\n [\n 'errant_compare', '-hyp', f'{pred_m2}', '-ref',\n f'{gold_m2}'\n ],\n stderr=subprocess.STDOUT).decode()\n tp, fp, fn = (int(i) for i in out.split('\\n')[3].split()[:3])\n self.tp += tp\n self.pred += tp + fp\n self.gold += tp + fn\n return self\n\n def __add__(self, other: PerplexityMetric) -> PerplexityMetric:\n metric = PerplexityMetric(eps=self.eps)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.n_tokens = self.n_tokens + other.n_tokens\n metric.total_loss = self.total_loss + other.total_loss\n\n metric.tp = self.tp + other.tp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.f if self.f > 0 else self.ppl\n\n @property\n def loss(self):\n return self.total_loss / self.n_tokens\n\n @property\n def ppl(self):\n return math.pow(2, (self.loss / math.log(2)))\n\n @property\n def p(self):\n return self.tp / (self.pred + self.eps)\n\n @property\n def r(self):\n return self.tp / (self.gold + self.eps)\n\n @property\n def f(self):\n return (1 + 0.5**2) * self.p * self.r / (0.5**2 * self.p + self.r +\n self.eps)" }, { "identifier": "SpanMetric", "path": "gec/metric.py", "snippet": "class SpanMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[List[List[Tuple]]] = None,\n golds: Optional[List[List[Tuple]]] = None,\n reverse: bool = False,\n beta: Optional[float] = 1.,\n eps: float = 1e-12) -> SpanMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_ucm = 0.0\n self.n_lcm = 0.0\n self.n_tr = 0.0\n self.n_fr = 0.0\n self.n_e = 0.0\n self.n_c = 0.0\n self.utp = 0.0\n self.ltp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n self.beta = beta\n\n if loss is not None:\n self(loss, preds, golds)\n\n def __repr__(self):\n s = f\"ErrorSents: {self.n_e:6.0f} CorrectSents: {self.n_c:6.0f} TR: {self.tr:7.2%} FR: {self.fr:7.2%} \"\n # s += f\"GoldSpans: {self.gold:6.0f} PredSpans: {self.pred:6.0f} \"\n s += f\"UP: {self.up:7.2%} UR: {self.ur:7.2%} UF{'' if self.beta == 1.0 else self.beta}: {self.uf:7.2%} \"\n s += f\"LP: {self.lp:7.2%} LR: {self.lr:7.2%} LF{'' if self.beta == 1.0 else self.beta}: {self.lf:7.2%}\"\n return s\n\n def __call__(self, loss: float, preds: List[List[Tuple]],\n golds: List[List[Tuple]]) -> SpanMetric:\n self.n += len(preds)\n self.count += 1\n self.total_loss += float(loss)\n for pred, gold in zip(preds, golds):\n upred, ugold = Counter([tuple(span[:-1])\n for span in pred]), Counter(\n [tuple(span[:-1]) for span in gold])\n lpred, lgold = Counter([tuple(span) for span in pred\n ]), Counter([tuple(span) for span in gold])\n utp, ltp = list((upred & ugold).elements()), list(\n (lpred & lgold).elements())\n self.n_ucm += len(utp) == len(pred) == len(gold)\n self.n_lcm += len(ltp) == len(pred) == len(gold)\n self.n_tr += ((len(gold) > 0) and (len(pred) > 0))\n self.n_fr += ((len(gold) == 0) and (len(pred) > 0))\n self.n_e += (len(gold) > 0)\n self.n_c += (len(gold) == 0)\n self.utp += len(utp)\n self.ltp += len(ltp)\n self.pred += len(pred)\n self.gold += len(gold)\n return self\n\n def __add__(self, other: SpanMetric) -> SpanMetric:\n metric = SpanMetric(eps=self.eps, beta=self.beta)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.total_loss = self.total_loss + other.total_loss\n metric.n_ucm = self.n_ucm + other.n_ucm\n metric.n_lcm = self.n_lcm + other.n_lcm\n metric.n_tr = self.n_tr + other.n_tr\n metric.n_fr = self.n_fr + other.n_fr\n metric.n_e = self.n_e + other.n_e\n metric.n_c = self.n_c + other.n_c\n metric.utp = self.utp + other.utp\n metric.ltp = self.ltp + other.ltp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.lf\n\n @property\n def ucm(self):\n return self.n_ucm / (self.n + self.eps)\n\n @property\n def lcm(self):\n return self.n_lcm / (self.n + self.eps)\n\n @property\n def tr(self):\n return self.n_tr / (self.n_e + self.eps)\n\n @property\n def fr(self):\n return self.n_fr / (self.n_c + self.eps)\n\n @property\n def up(self):\n return self.utp / (self.pred + self.eps)\n\n @property\n def ur(self):\n return self.utp / (self.gold + self.eps)\n\n @property\n def uf(self):\n return (1 + self.beta**2) * self.utp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)\n\n @property\n def lp(self):\n return self.ltp / (self.pred + self.eps)\n\n @property\n def lr(self):\n return self.ltp / (self.gold + self.eps)\n\n @property\n def lf(self):\n return (1 + self.beta**2) * self.ltp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)" }, { "identifier": "Seq2SeqDetectModel", "path": "gec/model.py", "snippet": "class Seq2SeqDetectModel(Seq2SeqModel):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_labels,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=1024,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n del self.classifier\n self.error_classifier = nn.Linear(self.model.config.d_model,\n self.args.n_labels)\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def loss(self, x, tgt, src_error, tgt_error, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, tgt_mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n y = self.decoder_dropout(y)\n # s_src_error = self.error_classifier(x[:, 1:-1])\n s_tgt_error = self.error_classifier(y)\n\n # src_mask = src_mask[:, 2:]\n\n if \"partial\" in self.args.error_schema:\n # src_mask = src_mask & (src_error != self.args.nul_index)\n tgt_mask = tgt_mask & (tgt_error != self.args.nul_index)\n # src_error_loss = self.criterion(s_src_error[src_mask], src_error[src_mask])\n tgt_error_loss = self.criterion(s_tgt_error[tgt_mask],\n tgt_error[tgt_mask])\n # return src_error_loss + tgt_error_loss\n return tgt_error_loss\n\n def decode(self, x, tgt, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n s_errors = self.error_classifier(y)\n if \"partial\" in self.args.error_schema:\n s_errors[...,\n self.args.nul_index] = torch.finfo(s_errors.dtype).min\n errors = s_errors.argmax(-1)\n errors[~mask] = -1\n\n return errors" }, { "identifier": "Seq2SeqModel", "path": "gec/model.py", "snippet": "class Seq2SeqModel(Model):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=512,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n if self.args.encoder == 'transformer':\n self.token_dropout = TokenDropout(self.args.token_dropout)\n self.decoder = TransformerDecoder(\n layer=TransformerDecoderLayer(\n n_heads=self.args.n_decoder_heads,\n n_model=self.args.n_decoder_hidden,\n n_inner=self.args.n_decoder_inner,\n dropout=self.args.decoder_dropout),\n n_layers=self.args.n_decoder_layers)\n\n else:\n from transformers import AutoModel\n self.model = AutoModel.from_pretrained(self.args.bart,\n dropout=self.args.dropout)\n self.encoder, self.decoder = self.model.encoder, self.model.decoder\n self.decoder_dropout = nn.Dropout(self.args.decoder_dropout)\n self.classifier = nn.Linear(self.args.n_encoder_hidden,\n self.args.n_words)\n self.classifier.weight = (self.word_embed.embed\n if self.args.encoder == 'transformer' else\n self.model.shared).weight\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def forward(self, words):\n r\"\"\"\n Args:\n words (~torch.LongTensor): ``[batch_size, seq_len]``.\n Word indices.\n\n Returns:\n ~torch.Tensor:\n Representations for the src sentences of the shape ``[batch_size, seq_len, n_model]``.\n \"\"\"\n # we need to do token dropout, so the TranformerWordEmbedding layer is not invoked here\n if self.args.encoder == 'transformer':\n embed = self.token_dropout(self.word_embed.embed(words))\n embed = embed * self.word_embed.embed_scale + self.word_embed.pos_embed(\n embed)\n embed = self.embed_dropout(embed)\n return self.encoder(embed, words.ne(self.args.pad_index))\n else:\n return self.encoder(input_ids=words,\n attention_mask=words.ne(\n self.args.pad_index))[0]\n\n def loss(self, x, tgt, src_mask, tgt_mask):\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n y = self.decoder_dropout(y)\n s_y = self.classifier(y)\n return self.criterion(s_y[tgt_mask], tgt[tgt_mask])\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (tuple(\n past_state.index_select(0, beam_idx)\n for past_state in layer_past), )\n return reordered_past\n\n def decode(self, x, src_mask):\n batch_size, *_ = x.shape\n beam_size, n_words = self.args.beam_size, self.args.n_words\n\n # repeat the src inputs beam_size times\n # [batch_size * beam_size, ...]\n x = x.unsqueeze(1).repeat(1, beam_size, 1, 1).view(-1, *x.shape[1:])\n src_mask = src_mask.unsqueeze(1).repeat(1, beam_size, 1).view(\n -1, *src_mask.shape[1:])\n # initialize the tgt inputs by <bos>\n # [batch_size * beam_size, seq_len]\n tgt = x.new_full((batch_size * beam_size, 1),\n self.args.bos_index,\n dtype=torch.long)\n # [batch_size * beam_size]\n active = src_mask.new_ones(batch_size * beam_size)\n # [batch_size]\n batches = tgt.new_tensor(range(batch_size)) * beam_size\n # accumulated scores\n scores = x.new_full((batch_size, self.args.beam_size),\n MIN).index_fill_(-1, tgt.new_tensor(0), 0).view(-1)\n\n def rank(scores, mask, k):\n scores = scores / mask.sum(-1).unsqueeze(\n -1)**self.args.length_penalty\n return scores.view(batch_size, -1).topk(k, -1)[1]\n\n if self.args.encoder != 'transformer':\n past_key_values = self.decoder(\n input_ids=torch.full_like(tgt[:, :1], self.args.eos_index),\n attention_mask=torch.ones_like(src_mask[:, :1]),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=None,\n use_cache=True)[1]\n\n for t in range(1, min(self.args.max_len + 1, int(1.8 * x.shape[1]))):\n tgt_mask = tgt.ne(self.args.pad_index)\n if self.args.encoder == 'transformer':\n attn_mask = tgt_mask.new_ones(t, t).tril_()\n s_y = self.decoder(self.embed(tgt[active]), x[active],\n tgt_mask[active], src_mask[active],\n attn_mask)\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n # [batch_size * beam_size, n_words]\n s_y = x.new_full((batch_size * beam_size, n_words),\n MIN).masked_scatter_(active.unsqueeze(-1),\n s_y)\n else:\n input_ids = tgt[:, -1:]\n s_y, new_past_key_values = self.decoder(\n input_ids=input_ids,\n attention_mask=torch.cat(\n (torch.ones_like(tgt_mask[:, :1]), tgt_mask), 1),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=past_key_values,\n use_cache=True)[:2]\n del past_key_values\n past_key_values = new_past_key_values\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n s_y[~active] = MIN\n\n s_y[~active, self.args.pad_index] = 0\n\n # [batch_size * beam_size, n_words]\n scores = scores.unsqueeze(-1) + s_y\n # [batch_size, beam_size]\n cands = rank(scores, tgt_mask, beam_size)\n # [batch_size * beam_size]\n scores = scores.view(batch_size, -1).gather(-1, cands).view(-1)\n # beams, tokens = cands // n_words, cands % n_words\n beams, tokens = cands.div(\n n_words, rounding_mode='floor'), (cands % n_words).view(-1, 1)\n indices = (batches.unsqueeze(-1) + beams).view(-1)\n # [batch_size * beam_size, seq_len + 1]\n tgt = torch.cat((tgt[indices], tokens), 1)\n past_key_values = self._reorder_cache(past_key_values, indices)\n active = tokens.ne(\n tokens.new_tensor(\n (self.args.eos_index, self.args.pad_index))).all(-1)\n\n if not active.any():\n break\n cands = rank(scores.view(-1, 1), tgt.ne(self.args.pad_index),\n self.args.topk)\n return tgt[(batches.unsqueeze(-1) + cands).view(-1)].view(\n batch_size, self.args.topk, -1)" }, { "identifier": "Field", "path": "gec/transform.py", "snippet": "class Field(supar.utils.Field):\n r\"\"\"\n Defines a datatype together with instructions for converting to :class:`~torch.Tensor`.\n :class:`Field` models common text processing datatypes that can be represented by tensors.\n It holds a :class:`~supar.utils.vocab.Vocab` object that defines the set of possible values\n for elements of the field and their corresponding numerical representations.\n The :class:`Field` object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method.\n\n Args:\n name (str):\n The name of the field.\n pad_token (str):\n The string token used as padding. Default: ``None``.\n unk_token (str):\n The string token used to represent OOV words. Default: ``None``.\n bos_token (str):\n A token that will be prepended to every example using this field, or ``None`` for no `bos_token`.\n Default: ``None``.\n eos_token (str):\n A token that will be appended to every example using this field, or ``None`` for no `eos_token`.\n lower (bool):\n Whether to lowercase the text in this field. Default: ``False``.\n use_vocab (bool):\n Whether to use a :class:`~supar.utils.vocab.Vocab` object.\n If ``False``, the data in this field should already be numerical.\n Default: ``True``.\n tokenize (function):\n The function used to tokenize strings using this field into sequential examples. Default: ``None``.\n fn (function):\n The function used for preprocessing the examples. Default: ``None``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.padding_side = kwargs.pop('padding_side') if 'padding_side' in kwargs else 'right'\n super().__init__(*args, **kwargs)\n\n def compose(self, batch: Iterable[torch.Tensor]) -> torch.Tensor:\n r\"\"\"\n Composes a batch of sequences into a padded tensor.\n\n Args:\n batch (Iterable[~torch.Tensor]):\n A list of tensors.\n\n Returns:\n A padded tensor converted to proper device.\n \"\"\"\n\n return pad(batch, self.pad_index, padding_side=self.padding_side).to(self.device, non_blocking=True)" }, { "identifier": "Text", "path": "gec/transform.py", "snippet": "class Text(Transform):\n\n fields = ['SRC', 'TGT']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None\n ) -> Text:\n super().__init__()\n\n self.SRC = SRC\n self.TGT = TGT\n\n @property\n def src(self):\n return self.SRC,\n\n @property\n def tgt(self):\n return self.TGT,\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (str or Iterable):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n index, sentence = 0, []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n sentence = TextSentence(self, sentence, index)\n yield sentence\n index += 1\n sentence = []\n else:\n sentence.append(line)" }, { "identifier": "Tree", "path": "gec/transform.py", "snippet": "class Tree(Transform):\n\n fields = ['SRC', 'TGT', 'SRCERROR', 'TGTERROR']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None,\n SRCERROR: Optional[Union[Field, Iterable[Field]]] = None,\n TGTERROR: Optional[Union[Field, Iterable[Field]]] = None,\n **kwargs\n ) -> Tree:\n super().__init__()\n self.error_schema = kwargs.pop('error_schema') if 'error_schema' in kwargs else 'last'\n self.fine_error_type = kwargs.pop('fine_error_type') if 'fine_error_type' in kwargs else False\n\n self.SRC = SRC\n self.TGT = TGT\n self.SRCERROR = SRCERROR\n self.TGTERROR = TGTERROR\n\n @property\n def src(self):\n return self.SRC, self.TGT\n\n @property\n def tgt(self):\n return self.SRCERROR, self.TGTERROR\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (Union[str, Iterable]):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n def consume(lines, chunksize=10000):\n index, sentence, chunk = 0, [], []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n chunk.append((sentence, index))\n if len(chunk) == chunksize:\n yield chunk\n chunk = []\n index += 1\n sentence = []\n else:\n sentence.append(line)\n if len(chunk) > 0:\n yield chunk\n\n @contextmanager\n def cache(lines):\n global global_transform\n global_transform = self\n ftemp = tempfile.mkdtemp()\n fbin = os.path.join(ftemp, 'data')\n try:\n yield ((chunk, f\"{fbin}.{i}\") for i, chunk in enumerate(consume(lines))), fbin\n finally:\n if dist.is_initialized() and not is_master():\n dist.barrier()\n del global_transform\n shutil.rmtree(ftemp)\n\n with cache(lines) as (chunks, fbin):\n if is_master():\n def process(chunk, fb):\n sentences = [TreeSentence(global_transform, *s) for s in progress_bar(chunk)]\n sentences = [s for s in sentences if s.vaild]\n return binarize({'sentences': sentences}, fb)[0]\n with mp.Pool(32) as pool:\n results = [pool.apply_async(process, (chunk, fb)) for chunk, fb in chunks]\n binarize((r.get() for r in results), fbin, merge=True)\n if dist.is_initialized() and not is_master():\n fbin = gather(fbin)[0]\n dist.barrier()\n for s in debinarize(fbin, meta=True)['sentences']:\n yield debinarize(fbin, s)" } ]
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
12,034
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq'
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq'
MODEL = Seq2SeqModel
5
2023-10-18 10:55:33+00:00
16k
jianlanluo/SAQ
vqn/conservative_sac_main.py
[ { "identifier": "VQN", "path": "vqn/vqn.py", "snippet": "class VQN(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.embedding_dim = 128\n config.codebook_size = 64\n config.commitment_cost = 1.0\n config.quantization_cost = 1.0\n config.entropy_loss_ratio = 0.0\n config.entropy_loss_type = \"softmax\"\n config.entropy_temperature = 1.0\n config.vqvae_arch = '512-512'\n config.action_only_quantization = False\n config.reconstruction_loss_type = 'l2'\n config.vqvae_lr = 3e-4\n\n config.discount = 0.99\n config.qf_arch = '512-512'\n config.qf_lr = 3e-4\n config.target_update_period = 200\n config.reset_qf = False\n config.td_loss_weight = 1.0\n\n config.bc_loss_weight = 0.0\n\n config.action_selection_threshold = 0.0\n\n config.cql_temp = 1.0\n config.cql_min_q_weight = 0.0\n \n config.qf_weight_decay = 0.0\n\n config.q_value_penalty_weight = 0.0\n config.q_value_penalty_type = 'l1'\n config.q_value_penalty_aggregation = 'mean'\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, observation_dim, action_dim):\n self.config = self.get_default_config(config)\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n\n self.vqvae = ActionVQVAE(\n observation_dim=self.observation_dim,\n action_dim=self.action_dim,\n embedding_dim=self.config.embedding_dim,\n codebook_size=self.config.codebook_size,\n commitment_cost=self.config.commitment_cost,\n quantization_cost=self.config.quantization_cost,\n entropy_loss_ratio=self.config.entropy_loss_ratio,\n entropy_loss_type=self.config.entropy_loss_type,\n entropy_temperature=self.config.entropy_temperature,\n arch=self.config.vqvae_arch,\n action_only_quantization=self.config.action_only_quantization,\n reconstruction_loss_type=self.config.reconstruction_loss_type,\n )\n\n self._vqvae_train_state = TrainState.create(\n params=self.vqvae.init(\n next_rng(self.vqvae.rng_keys()),\n jnp.zeros((1, observation_dim)),\n jnp.zeros((1, action_dim)),\n train=True\n ),\n tx=optax.adam(self.config.vqvae_lr),\n apply_fn=None,\n )\n self._vqvae_total_steps = 0\n\n self.qf = FullyConnectedNetwork(\n output_dim=self.config.codebook_size,\n arch=self.config.qf_arch,\n )\n\n qf_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((1, observation_dim)),\n )\n\n self._qf_optimizer = optax.adam(self.config.qf_lr)\n self._qf_train_state = DQNTrainState.create(\n params=qf_params,\n target_params=deepcopy(qf_params),\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._dqn_total_steps = 0\n\n self._sampler_policy = VQSamplerPolicy(\n self.qf, self.vqvae,\n self._qf_train_state.params, self._vqvae_train_state.params\n )\n\n\n def train_vqvae(self, batch):\n self._vqvae_train_state, metrics = self._vqvae_train_step(\n next_rng(), self._vqvae_train_state, batch\n )\n self._vqvae_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', ))\n def _vqvae_train_step(self, rng, train_state, batch):\n observations = batch['observations']\n actions = batch['actions']\n rng_generator = JaxRNG(rng)\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n reconstructed, result_dict = self.vqvae.apply(\n train_params,\n observations,\n actions,\n train=True,\n )\n return result_dict['loss'], result_dict\n\n grads, aux_values = grad_fn(train_state.params)\n new_train_state = train_state.apply_gradients(grads=grads)\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'reconstruction_loss', 'quantizer_loss', 'e_latent_loss', 'q_latent_loss',\n 'entropy_loss', 'action_prior_loss', 'action_prior_accuracy'],\n )\n return new_train_state, metrics\n\n def train_dqn(self, batch, bc=False):\n self._qf_train_state, metrics = self._dqn_train_step(\n next_rng(), self._qf_train_state, self._vqvae_train_state, batch,\n bc\n )\n self._dqn_total_steps += 1\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _dqn_train_step(self, rng, qf_train_state, vqvae_train_state, batch, bc=False):\n observations = batch['observations']\n original_actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n rng_generator = JaxRNG(rng)\n\n actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n original_actions,\n method=self.vqvae.encode\n )\n\n @partial(jax.grad, has_aux=True)\n def grad_fn(train_params):\n def select_by_action(q_vals, actions):\n return jnp.squeeze(\n jnp.take_along_axis(\n q_vals, jnp.expand_dims(actions, -1), axis=-1\n ),\n axis=-1\n )\n\n def select_actions(params, observations):\n q_values = self.qf.apply(params, observations)\n action_priors = jax.nn.softmax(\n self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n method=self.vqvae.action_prior_logits\n ),\n axis=-1\n )\n action_selection_threshold = jnp.minimum(\n jnp.amax(action_priors, axis=-1, keepdims=True),\n self.config.action_selection_threshold\n )\n action_mask = (\n action_priors >= action_selection_threshold\n ).astype(jnp.float32)\n masked_q_values = (\n action_mask * q_values + (1.0 - action_mask) * jnp.min(q_values)\n )\n return jnp.argmax(masked_q_values, axis=-1)\n\n\n q_values = self.qf.apply(train_params, observations)\n current_actions_q_values = select_by_action(q_values, actions)\n next_q_values = self.qf.apply(qf_train_state.target_params, next_observations)\n next_actions = select_actions(train_params, next_observations)\n target_q_values = select_by_action(next_q_values, next_actions)\n\n td_target = rewards + (1. - dones) * self.config.discount * target_q_values\n\n td_loss = mse_loss(current_actions_q_values, jax.lax.stop_gradient(td_target))\n loss = self.config.td_loss_weight * td_loss\n\n current_actions = jnp.argmax(q_values, axis=-1)\n max_q_values = jnp.max(q_values, axis=-1)\n advantage = max_q_values - current_actions_q_values\n\n policy_dataset_aggrement_rate = jnp.mean(current_actions == actions)\n reconstructed_current_actions = self.vqvae.apply(\n vqvae_train_state.params,\n observations,\n current_actions,\n method=self.vqvae.decode\n )\n current_action_mse = jnp.sum(\n jnp.square(reconstructed_current_actions - original_actions),\n axis=-1\n ).mean()\n\n bc_loss = jnp.mean(optax.softmax_cross_entropy_with_integer_labels(q_values, actions))\n loss = loss + self.config.bc_loss_weight * bc_loss\n\n cql_lse_q_values = self.config.cql_temp * jax.scipy.special.logsumexp(\n q_values / self.config.cql_temp, axis=-1\n )\n cql_min_q_loss = jnp.mean(cql_lse_q_values - current_actions_q_values)\n loss = loss + self.config.cql_min_q_weight * cql_min_q_loss\n\n if self.config.q_value_penalty_aggregation == 'none':\n aggregated_q_values = q_values\n elif self.config.q_value_penalty_aggregation == 'mean':\n aggregated_q_values = jnp.mean(q_values)\n else:\n raise ValueError('Unsupport value penalty aggregation type!')\n\n if self.config.q_value_penalty_type == 'l1':\n q_value_penalty_loss = jnp.mean(jnp.abs(aggregated_q_values))\n elif self.config.q_value_penalty_type == 'l2':\n q_value_penalty_loss = jnp.mean(jnp.square(aggregated_q_values))\n else:\n raise ValueError('Unsupport value penalty type!')\n\n loss = loss + self.config.q_value_penalty_weight * q_value_penalty_loss\n\n if bc:\n loss = bc_loss\n\n return loss, locals()\n\n grads, aux_values = grad_fn(qf_train_state.params)\n new_target_params = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n lambda: qf_train_state.params,\n lambda: qf_train_state.target_params,\n )\n if self.config.reset_qf:\n def reset_qf_params():\n qf_params = self.qf.init(\n rng_generator(self.qf.rng_keys()),\n jnp.zeros((1, self.observation_dim)),\n )\n return DQNTrainState.create(\n params=qf_params,\n target_params=new_target_params,\n tx=self._qf_optimizer,\n apply_fn=None,\n )\n\n new_qf_train_state = jax.lax.cond(\n qf_train_state.step % self.config.target_update_period == self.config.target_update_period - 1,\n reset_qf_params,\n lambda: qf_train_state.apply_gradients(grads=grads, target_params=new_target_params)\n )\n else:\n new_qf_train_state = qf_train_state.apply_gradients(\n grads=grads, target_params=new_target_params\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['loss', 'current_actions_q_values', 'max_q_values', 'target_q_values',\n 'advantage', 'td_target', 'td_loss', 'cql_lse_q_values', 'cql_min_q_loss',\n 'policy_dataset_aggrement_rate', 'bc_loss', 'current_action_mse',\n 'q_value_penalty_loss'],\n )\n\n return new_qf_train_state, metrics\n\n def get_sampler_policy(self):\n return self._sampler_policy.update_params(\n self._qf_train_state.params, self._vqvae_train_state.params\n )" }, { "identifier": "ConservativeSAC", "path": "vqn/conservative_sac.py", "snippet": "class ConservativeSAC(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.discount = 0.99\n config.alpha_multiplier = 0.0\n config.use_automatic_entropy_tuning = False\n config.backup_entropy = False\n config.target_entropy = 0.0\n config.policy_lr = 3e-4\n config.policy_weight_decay = 0.0\n config.qf_lr = 3e-4\n config.qf_weight_decay = 0.0\n config.optimizer_type = 'adam'\n config.soft_target_update_rate = 5e-3\n config.use_cql = False\n config.cql_n_actions = 10\n config.cql_importance_sample = True\n config.cql_lagrange = False\n config.cql_target_action_gap = 1.0\n config.cql_temp = 1.0\n config.cql_min_q_weight = 5.0\n config.cql_max_target_backup = False\n config.cql_clip_diff_min = -np.inf\n config.cql_clip_diff_max = np.inf\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, policy, qf):\n self.config = self.get_default_config(config)\n self.policy = policy\n self.qf = qf\n self.observation_dim = policy.observation_dim\n self.action_dim = policy.action_dim\n\n self._train_states = {}\n\n optimizer_class = {\n 'adam': optax.adam,\n 'sgd': optax.sgd,\n }[self.config.optimizer_type]\n\n policy_params = self.policy.init(\n next_rng(self.policy.rng_keys()),\n jnp.zeros((10, self.observation_dim))\n )\n self._train_states['policy'] = TrainState.create(\n params=policy_params,\n tx=optax.adamw(self.config.qf_lr, self.config.policy_weight_decay),\n apply_fn=None\n )\n\n qf1_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf1'] = TrainState.create(\n params=qf1_params,\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n qf2_params = self.qf.init(\n next_rng(self.qf.rng_keys()),\n jnp.zeros((10, self.observation_dim)),\n jnp.zeros((10, self.action_dim))\n )\n self._train_states['qf2'] = TrainState.create(\n params=qf2_params,\n tx=optax.adamw(self.config.qf_lr, self.config.qf_weight_decay),\n apply_fn=None,\n )\n self._target_qf_params = deepcopy({'qf1': qf1_params, 'qf2': qf2_params})\n\n model_keys = ['policy', 'qf1', 'qf2']\n\n if self.config.use_automatic_entropy_tuning:\n self.log_alpha = Scalar(0.0)\n self._train_states['log_alpha'] = TrainState.create(\n params=self.log_alpha.init(next_rng()),\n tx=optimizer_class(self.config.policy_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha')\n\n if self.config.cql_lagrange:\n self.log_alpha_prime = Scalar(1.0)\n self._train_states['log_alpha_prime'] = TrainState.create(\n params=self.log_alpha_prime.init(next_rng()),\n tx=optimizer_class(self.config.qf_lr),\n apply_fn=None\n )\n model_keys.append('log_alpha_prime')\n\n self._model_keys = tuple(model_keys)\n self._total_steps = 0\n\n def train(self, batch, bc=False):\n self._total_steps += 1\n self._train_states, self._target_qf_params, metrics = self._train_step(\n self._train_states, self._target_qf_params, next_rng(), batch, bc\n )\n return metrics\n\n @partial(jax.jit, static_argnames=('self', 'bc'))\n def _train_step(self, train_states, target_qf_params, rng, batch, bc=False):\n rng_generator = JaxRNG(rng)\n\n def loss_fn(train_params):\n observations = batch['observations']\n actions = batch['actions']\n rewards = batch['rewards']\n next_observations = batch['next_observations']\n dones = batch['dones']\n\n loss_collection = {}\n\n @wrap_function_with_rng(rng_generator())\n def forward_policy(rng, *args, **kwargs):\n return self.policy.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n @wrap_function_with_rng(rng_generator())\n def forward_qf(rng, *args, **kwargs):\n return self.qf.apply(\n *args, **kwargs,\n rngs=JaxRNG(rng)(self.qf.rng_keys())\n )\n\n new_actions, log_pi = forward_policy(train_params['policy'], observations)\n\n if self.config.use_automatic_entropy_tuning:\n alpha_loss = -self.log_alpha.apply(train_params['log_alpha']) * (log_pi + self.config.target_entropy).mean()\n loss_collection['log_alpha'] = alpha_loss\n alpha = jnp.exp(self.log_alpha.apply(train_params['log_alpha'])) * self.config.alpha_multiplier\n else:\n alpha_loss = 0.0\n alpha = self.config.alpha_multiplier\n\n \"\"\" Policy loss \"\"\"\n if bc:\n log_probs = forward_policy(train_params['policy'], observations, actions, method=self.policy.log_prob)\n policy_loss = (alpha*log_pi - log_probs).mean()\n else:\n q_new_actions = jnp.minimum(\n forward_qf(train_params['qf1'], observations, new_actions),\n forward_qf(train_params['qf2'], observations, new_actions),\n )\n policy_loss = (alpha*log_pi - q_new_actions).mean()\n\n loss_collection['policy'] = policy_loss\n\n \"\"\" Q function loss \"\"\"\n q1_pred = forward_qf(train_params['qf1'], observations, actions)\n q2_pred = forward_qf(train_params['qf2'], observations, actions)\n\n if self.config.cql_max_target_backup:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n max_target_indices = jnp.expand_dims(jnp.argmax(target_q_values, axis=-1), axis=-1)\n target_q_values = jnp.take_along_axis(target_q_values, max_target_indices, axis=-1).squeeze(-1)\n next_log_pi = jnp.take_along_axis(next_log_pi, max_target_indices, axis=-1).squeeze(-1)\n else:\n new_next_actions, next_log_pi = forward_policy(\n train_params['policy'], next_observations\n )\n target_q_values = jnp.minimum(\n forward_qf(target_qf_params['qf1'], next_observations, new_next_actions),\n forward_qf(target_qf_params['qf2'], next_observations, new_next_actions),\n )\n\n if self.config.backup_entropy:\n target_q_values = target_q_values - alpha * next_log_pi\n\n td_target = jax.lax.stop_gradient(\n rewards + (1. - dones) * self.config.discount * target_q_values\n )\n qf1_loss = mse_loss(q1_pred, td_target)\n qf2_loss = mse_loss(q2_pred, td_target)\n\n ### CQL\n if self.config.use_cql:\n batch_size = actions.shape[0]\n cql_random_actions = jax.random.uniform(\n rng_generator(), shape=(batch_size, self.config.cql_n_actions, self.action_dim),\n minval=-1.0, maxval=1.0\n )\n\n cql_current_actions, cql_current_log_pis = forward_policy(\n train_params['policy'], observations, repeat=self.config.cql_n_actions,\n )\n cql_next_actions, cql_next_log_pis = forward_policy(\n train_params['policy'], next_observations, repeat=self.config.cql_n_actions,\n )\n\n cql_q1_rand = forward_qf(train_params['qf1'], observations, cql_random_actions)\n cql_q2_rand = forward_qf(train_params['qf2'], observations, cql_random_actions)\n cql_q1_current_actions = forward_qf(train_params['qf1'], observations, cql_current_actions)\n cql_q2_current_actions = forward_qf(train_params['qf2'], observations, cql_current_actions)\n cql_q1_next_actions = forward_qf(train_params['qf1'], observations, cql_next_actions)\n cql_q2_next_actions = forward_qf(train_params['qf2'], observations, cql_next_actions)\n\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand, jnp.expand_dims(q1_pred, 1), cql_q1_next_actions, cql_q1_current_actions], axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand, jnp.expand_dims(q2_pred, 1), cql_q2_next_actions, cql_q2_current_actions], axis=1\n )\n cql_std_q1 = jnp.std(cql_cat_q1, axis=1)\n cql_std_q2 = jnp.std(cql_cat_q2, axis=1)\n\n if self.config.cql_importance_sample:\n random_density = np.log(0.5 ** self.action_dim)\n cql_cat_q1 = jnp.concatenate(\n [cql_q1_rand - random_density,\n cql_q1_next_actions - cql_next_log_pis,\n cql_q1_current_actions - cql_current_log_pis],\n axis=1\n )\n cql_cat_q2 = jnp.concatenate(\n [cql_q2_rand - random_density,\n cql_q2_next_actions - cql_next_log_pis,\n cql_q2_current_actions - cql_current_log_pis],\n axis=1\n )\n\n cql_qf1_ood = (\n jax.scipy.special.logsumexp(cql_cat_q1 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n cql_qf2_ood = (\n jax.scipy.special.logsumexp(cql_cat_q2 / self.config.cql_temp, axis=1)\n * self.config.cql_temp\n )\n\n \"\"\"Subtract the log likelihood of data\"\"\"\n cql_qf1_diff = jnp.clip(\n cql_qf1_ood - q1_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n cql_qf2_diff = jnp.clip(\n cql_qf2_ood - q2_pred,\n self.config.cql_clip_diff_min,\n self.config.cql_clip_diff_max,\n ).mean()\n\n if self.config.cql_lagrange:\n alpha_prime = jnp.clip(\n jnp.exp(self.log_alpha_prime.apply(train_params['log_alpha_prime'])),\n a_min=0.0, a_max=1000000.0\n )\n cql_min_qf1_loss = alpha_prime * self.config.cql_min_q_weight * (cql_qf1_diff - self.config.cql_target_action_gap)\n cql_min_qf2_loss = alpha_prime * self.config.cql_min_q_weight * (cql_qf2_diff - self.config.cql_target_action_gap)\n\n alpha_prime_loss = (-cql_min_qf1_loss - cql_min_qf2_loss)*0.5\n\n loss_collection['log_alpha_prime'] = alpha_prime_loss\n\n else:\n cql_min_qf1_loss = cql_qf1_diff * self.config.cql_min_q_weight\n cql_min_qf2_loss = cql_qf2_diff * self.config.cql_min_q_weight\n alpha_prime_loss = 0.0\n alpha_prime = 0.0\n\n qf1_loss = qf1_loss + cql_min_qf1_loss\n qf2_loss = qf2_loss + cql_min_qf2_loss\n\n loss_collection['qf1'] = qf1_loss\n loss_collection['qf2'] = qf2_loss\n return tuple(loss_collection[key] for key in self.model_keys), locals()\n\n train_params = {key: train_states[key].params for key in self.model_keys}\n (_, aux_values), grads = value_and_multi_grad(loss_fn, len(self.model_keys), has_aux=True)(train_params)\n\n new_train_states = {\n key: train_states[key].apply_gradients(grads=grads[i][key])\n for i, key in enumerate(self.model_keys)\n }\n new_target_qf_params = {}\n new_target_qf_params['qf1'] = update_target_network(\n new_train_states['qf1'].params, target_qf_params['qf1'],\n self.config.soft_target_update_rate\n )\n new_target_qf_params['qf2'] = update_target_network(\n new_train_states['qf2'].params, target_qf_params['qf2'],\n self.config.soft_target_update_rate\n )\n\n metrics = collect_jax_metrics(\n aux_values,\n ['log_pi', 'policy_loss', 'qf1_loss', 'qf2_loss', 'alpha_loss',\n 'alpha', 'q1_pred', 'q2_pred', 'target_q_values']\n )\n\n if self.config.use_cql:\n metrics.update(collect_jax_metrics(\n aux_values,\n ['cql_std_q1', 'cql_std_q2', 'cql_q1_rand', 'cql_q2_rand'\n 'cql_qf1_diff', 'cql_qf2_diff', 'cql_min_qf1_loss',\n 'cql_min_qf2_loss', 'cql_q1_current_actions', 'cql_q2_current_actions'\n 'cql_q1_next_actions', 'cql_q2_next_actions', 'alpha_prime',\n 'alpha_prime_loss'],\n 'cql'\n ))\n\n return new_train_states, new_target_qf_params, metrics\n\n @property\n def model_keys(self):\n return self._model_keys\n\n @property\n def train_states(self):\n return self._train_states\n\n @property\n def train_params(self):\n return {key: self.train_states[key].params for key in self.model_keys}\n\n @property\n def total_steps(self):\n return self._total_steps" }, { "identifier": "get_d4rl_dataset", "path": "vqn/replay_buffer.py", "snippet": "def get_d4rl_dataset(env):\n dataset = d4rl.qlearning_dataset(env)\n return dict(\n observations=dataset['observations'],\n actions=dataset['actions'],\n next_observations=dataset['next_observations'],\n rewards=dataset['rewards'],\n dones=dataset['terminals'].astype(np.float32),\n )" }, { "identifier": "subsample_batch", "path": "vqn/replay_buffer.py", "snippet": "def subsample_batch(batch, size):\n indices = np.random.randint(batch['observations'].shape[0], size=size)\n return index_batch(batch, indices)" }, { "identifier": "batch_to_jax", "path": "vqn/jax_utils.py", "snippet": "@jax.jit\ndef batch_to_jax(batch):\n return jax.tree_util.tree_map(jax.device_put, batch)" }, { "identifier": "TanhGaussianPolicy", "path": "vqn/model.py", "snippet": "class TanhGaussianPolicy(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n log_std_multiplier: float = 1.0\n log_std_offset: float = -1.0\n use_tanh: bool = True\n\n def setup(self):\n self.base_network = FullyConnectedNetwork(\n output_dim=2 * self.action_dim, arch=self.arch, orthogonal_init=self.orthogonal_init\n )\n self.log_std_multiplier_module = Scalar(self.log_std_multiplier)\n self.log_std_offset_module = Scalar(self.log_std_offset)\n\n def log_prob(self, observations, actions):\n if actions.ndim == 3:\n observations = extend_and_repeat(observations, 1, actions.shape[1])\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n return action_distribution.log_prob(actions)\n\n def __call__(self, observations, deterministic=False, repeat=None):\n if repeat is not None:\n observations = extend_and_repeat(observations, 1, repeat)\n base_network_output = self.base_network(observations)\n mean, log_std = jnp.split(base_network_output, 2, axis=-1)\n log_std = self.log_std_multiplier_module() * log_std + self.log_std_offset_module()\n log_std = jnp.clip(log_std, -20.0, 2.0)\n action_distribution = distrax.MultivariateNormalDiag(mean, jnp.exp(log_std))\n if self.use_tanh:\n action_distribution = distrax.Transformed(\n action_distribution, distrax.Block(distrax.Tanh(), ndims=1)\n )\n if deterministic:\n samples = mean\n if self.use_tanh:\n samples = jnp.tanh(samples)\n log_prob = action_distribution.log_prob(samples)\n else:\n samples, log_prob = action_distribution.sample_and_log_prob(seed=self.make_rng('noise'))\n\n return samples, log_prob\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', 'noise')" }, { "identifier": "FullyConnectedQFunction", "path": "vqn/model.py", "snippet": "class FullyConnectedQFunction(nn.Module):\n observation_dim: int\n action_dim: int\n arch: str = '256-256'\n orthogonal_init: bool = False\n\n @nn.compact\n @multiple_action_q_function\n def __call__(self, observations, actions):\n x = jnp.concatenate([observations, actions], axis=-1)\n x = FullyConnectedNetwork(output_dim=1, arch=self.arch, orthogonal_init=self.orthogonal_init)(x)\n return jnp.squeeze(x, -1)\n\n @nn.nowrap\n def rng_keys(self):\n return ('params', )" }, { "identifier": "SamplerPolicy", "path": "vqn/model.py", "snippet": "class SamplerPolicy(object):\n\n def __init__(self, policy, params):\n self.policy = policy\n self.params = params\n\n def update_params(self, params):\n self.params = params\n return self\n\n @partial(jax.jit, static_argnames=('self', 'deterministic'))\n def act(self, params, rng, observations, deterministic):\n return self.policy.apply(\n params, observations, deterministic, repeat=None,\n rngs=JaxRNG(rng)(self.policy.rng_keys())\n )\n\n def __call__(self, observations, deterministic=False):\n actions, _ = self.act(self.params, next_rng(), observations, deterministic=deterministic)\n assert jnp.all(jnp.isfinite(actions))\n return jax.device_get(actions)" }, { "identifier": "StepSampler", "path": "vqn/sampler.py", "snippet": "class StepSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n def sample(self, policy, n_steps, deterministic=False, replay_buffer=None):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n for _ in range(n_steps):\n self._traj_steps += 1\n observation = self._current_observation\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n self._current_observation = next_observation\n\n if done or self._traj_steps >= self.max_traj_length:\n self._traj_steps = 0\n self._current_observation = self.env.reset()\n\n return dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n )\n\n @property\n def env(self):\n return self._env" }, { "identifier": "TrajSampler", "path": "vqn/sampler.py", "snippet": "class TrajSampler(object):\n\n def __init__(self, env, max_traj_length=1000):\n self.max_traj_length = max_traj_length\n self._env = env\n\n def sample(self, policy, n_trajs, replay_buffer=None, deterministic=False):\n trajs = []\n for _ in range(n_trajs):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n dones = []\n\n observation = self.env.reset()\n\n for _ in range(self.max_traj_length):\n action = policy(observation.reshape(1, -1), deterministic=deterministic).reshape(-1)\n next_observation, reward, done, _ = self.env.step(action)\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n dones.append(done)\n next_observations.append(next_observation)\n\n if replay_buffer is not None:\n replay_buffer.add_sample(\n observation, action, reward, next_observation, done\n )\n\n observation = next_observation\n\n if done:\n break\n\n trajs.append(dict(\n observations=np.array(observations, dtype=np.float32),\n actions=np.array(actions, dtype=np.float32),\n rewards=np.array(rewards, dtype=np.float32),\n next_observations=np.array(next_observations, dtype=np.float32),\n dones=np.array(dones, dtype=np.float32),\n ))\n\n return trajs\n\n @property\n def env(self):\n return self._env" }, { "identifier": "SequenceDataset", "path": "vqn/robomimic_utils.py", "snippet": "OBS_KEYS = (\"robot0_eef_pos\", \"robot0_eef_quat\", \"robot0_gripper_qpos\", \"object\")\nENV_TO_HORIZON_MAP = {'lift': 400,\n 'can': 400,\n 'square': 400,\n 'transport': 700,\n 'tool_hang': 700}\ndef make_dataset(dataset, env_name):\ndef process_robomimic_dataset(seq_dataset):\ndef get_robomimic_env(dataset_path, example_action, env_name):\n def __init__(self, env, horizon, example_action):\n def step(self, action):\n def reset(self):\n def render(self):\n def get_normalized_score(self, rewards):\n def _process_obs(self, obs):\ndef _check_lengths(dataset_dict: DatasetDict,\n dataset_len: Optional[int] = None) -> int:\ndef _subselect(dataset_dict: DatasetDict, index: np.ndarray) -> DatasetDict:\ndef _sample(dataset_dict: Union[np.ndarray, DatasetDict],\n indx: np.ndarray) -> DatasetDict:\n def __init__(self, dataset_dict: DatasetDict, seed: Optional[int] = None):\n def np_random(self) -> np.random.RandomState:\n def seed(self, seed: Optional[int] = None) -> list:\n def __len__(self) -> int:\n def sample(self,\n batch_size: int,\n keys: Optional[Iterable[str]] = None,\n indx: Optional[np.ndarray] = None) -> frozen_dict.FrozenDict:\n def split(self, ratio: float) -> Tuple['Dataset', 'Dataset']:\n def _trajectory_boundaries_and_returns(self) -> Tuple[list, list, list]:\n def filter(self,\n percentile: Optional[float] = None,\n threshold: Optional[float] = None):\n def normalize_returns(self, scaling: float = 1000):\n def __init__(self,\n dataset_dict: dict,\n clip_to_eps: bool = True,\n eps: float = 1e-5):\n def __init__(self,\n env: gym.Env,\n clip_to_eps: bool = True,\n eps: float = 1e-5,\n ignore_done: bool = False,\n custom_dataset: dict = None):\nclass RobosuiteGymWrapper():\nclass Dataset(object):\nclass OfflineDataset(Dataset):\nclass D4RLDataset(Dataset):" }, { "identifier": "Timer", "path": "vqn/utils.py", "snippet": "class Timer(object):\n\n def __init__(self):\n self._time = None\n\n def __enter__(self):\n self._start_time = time.time()\n return self\n\n def __exit__(self, exc_type, exc_value, exc_tb):\n self._time = time.time() - self._start_time\n\n def __call__(self):\n return self._time" }, { "identifier": "define_flags_with_default", "path": "vqn/utils.py", "snippet": "def define_flags_with_default(**kwargs):\n for key, val in kwargs.items():\n if isinstance(val, ConfigDict):\n config_flags.DEFINE_config_dict(key, val)\n elif isinstance(val, bool):\n # Note that True and False are instances of int.\n absl.flags.DEFINE_bool(key, val, 'automatically defined flag')\n elif isinstance(val, int):\n absl.flags.DEFINE_integer(key, val, 'automatically defined flag')\n elif isinstance(val, float):\n absl.flags.DEFINE_float(key, val, 'automatically defined flag')\n elif isinstance(val, str):\n absl.flags.DEFINE_string(key, val, 'automatically defined flag')\n else:\n raise ValueError('Incorrect value type')\n return kwargs" }, { "identifier": "set_random_seed", "path": "vqn/utils.py", "snippet": "def set_random_seed(seed):\n np.random.seed(seed)\n random.seed(seed)\n init_rng(seed)" }, { "identifier": "print_flags", "path": "vqn/utils.py", "snippet": "def print_flags(flags, flags_def):\n logging.info(\n 'Running training with hyperparameters: \\n{}'.format(\n pprint.pformat(\n ['{}: {}'.format(key, val) for key, val in get_user_flags(flags, flags_def).items()]\n )\n )\n )" }, { "identifier": "get_user_flags", "path": "vqn/utils.py", "snippet": "def get_user_flags(flags, flags_def):\n output = {}\n for key in flags_def:\n val = getattr(flags, key)\n if isinstance(val, ConfigDict):\n output.update(flatten_config_dict(val, prefix=key))\n else:\n output[key] = val\n\n return output" }, { "identifier": "prefix_metrics", "path": "vqn/utils.py", "snippet": "def prefix_metrics(metrics, prefix):\n return {\n '{}/{}'.format(prefix, key): value for key, value in metrics.items()\n }" }, { "identifier": "WandBLogger", "path": "vqn/utils.py", "snippet": "class WandBLogger(object):\n\n @staticmethod\n def get_default_config(updates=None):\n config = ConfigDict()\n config.online = False\n config.prefix = 'JaxCQL'\n config.project = ''\n config.output_dir = '/tmp/JaxCQL'\n config.random_delay = 0.0\n config.experiment_id = config_dict.placeholder(str)\n config.anonymous = config_dict.placeholder(str)\n config.notes = config_dict.placeholder(str)\n config.entity = config_dict.placeholder(str)\n\n if updates is not None:\n config.update(ConfigDict(updates).copy_and_resolve_references())\n return config\n\n def __init__(self, config, variant):\n self.config = self.get_default_config(config)\n\n if self.config.experiment_id is None:\n self.config.experiment_id = uuid.uuid4().hex\n\n if self.config.prefix != '':\n self.config.project = '{}--{}'.format(self.config.prefix, self.config.project)\n\n if self.config.output_dir == '':\n self.config.output_dir = tempfile.mkdtemp()\n else:\n self.config.output_dir = os.path.join(self.config.output_dir, self.config.experiment_id)\n os.makedirs(self.config.output_dir, exist_ok=True)\n\n self._variant = copy(variant)\n\n if 'hostname' not in self._variant:\n self._variant['hostname'] = gethostname()\n\n if self.config.random_delay > 0:\n time.sleep(np.random.uniform(0, self.config.random_delay))\n\n self.run = wandb.init(\n reinit=True,\n config=self._variant,\n project=self.config.project,\n dir=self.config.output_dir,\n entity=config.entity,\n id=self.config.experiment_id,\n anonymous=self.config.anonymous,\n notes=self.config.notes,\n settings=wandb.Settings(\n start_method=\"thread\",\n _disable_stats=True,\n ),\n mode='online' if self.config.online else 'offline',\n )\n\n def log(self, *args, **kwargs):\n self.run.log(*args, **kwargs)\n\n def save_pickle(self, obj, filename):\n with open(os.path.join(self.config.output_dir, filename), 'wb') as fout:\n pickle.dump(obj, fout)\n\n @property\n def experiment_id(self):\n return self.config.experiment_id\n\n @property\n def variant(self):\n return self.config.variant\n\n @property\n def output_dir(self):\n return self.config.output_dir" } ]
import os import time import uuid import numpy as np import pprint import jax import jax.numpy as jnp import flax import gym import d4rl import absl.app import absl.flags from copy import deepcopy from .vqn import VQN from .conservative_sac import ConservativeSAC from .replay_buffer import get_d4rl_dataset, subsample_batch from .jax_utils import batch_to_jax from .model import TanhGaussianPolicy, FullyConnectedQFunction, SamplerPolicy from .sampler import StepSampler, TrajSampler from .robomimic_utils import ( SequenceDataset, make_dataset, process_robomimic_dataset, D4RLDataset, get_robomimic_env, ENV_TO_HORIZON_MAP, OBS_KEYS ) from .utils import ( Timer, define_flags_with_default, set_random_seed, print_flags, get_user_flags, prefix_metrics, WandBLogger ) from viskit.logging import logger, setup_logger
11,036
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch} with Timer() as train_timer: for batch_idx in range(FLAGS.n_train_step_per_epoch): batch = dataset.sample(FLAGS.batch_size)
FLAGS_DEF = define_flags_with_default( env='halfcheetah-medium-v2', algorithm='cql', max_traj_length=200, seed=42, save_model=False, batch_size=256, reward_scale=1.0, reward_bias=0.0, clip_action=0.999, policy_arch='256-256', qf_arch='256-256', orthogonal_init=False, policy_log_std_multiplier=1.0, policy_log_std_offset=-1.0, n_epochs=1000, bc_epochs=1000, n_train_step_per_epoch=1000, eval_period=10, eval_n_trajs=5, cql=ConservativeSAC.get_default_config(), logging=WandBLogger.get_default_config(), ) def main(argv): FLAGS = absl.flags.FLAGS variant = get_user_flags(FLAGS, FLAGS_DEF) wandb_logger = WandBLogger(config=FLAGS.logging, variant=variant) setup_logger( variant=variant, exp_id=wandb_logger.experiment_id, seed=FLAGS.seed, base_log_dir=FLAGS.logging.output_dir, include_exp_prefix_sub_dir=False ) set_random_seed(FLAGS.seed) if FLAGS.env in ENV_TO_HORIZON_MAP: dataset_path = f'./robomimic/datasets/{FLAGS.env}/low_dim_v141.hdf5' seq_dataset = SequenceDataset(hdf5_path=dataset_path, obs_keys=OBS_KEYS, dataset_keys=("actions", "rewards", "dones"), hdf5_cache_mode="all", load_next_obs=True) dataset = process_robomimic_dataset(seq_dataset) dataset = D4RLDataset(env=None, custom_dataset=dataset) example_ob = dataset.dataset_dict['observations'][0][np.newaxis] example_action = dataset.dataset_dict['actions'][0][np.newaxis] env = get_robomimic_env(dataset_path, example_action, FLAGS.env) max_len = ENV_TO_HORIZON_MAP[FLAGS.env] else: env = gym.make(FLAGS.env).unwrapped dataset = get_d4rl_dataset(env) dataset['rewards'] = dataset['rewards'] * FLAGS.reward_scale + FLAGS.reward_bias dataset['actions'] = np.clip(dataset['actions'], -FLAGS.clip_action, FLAGS.clip_action) max_len = FLAGS.max_traj_length example_ob = env.observation_space.sample()[np.newaxis] example_action = env.action_space.sample()[np.newaxis] eval_sampler = TrajSampler(env, max_len) observation_dim = example_ob.shape[1] action_dim = example_action.shape[1] dataset = make_dataset(dataset, FLAGS.env) policy = TanhGaussianPolicy( observation_dim, action_dim, FLAGS.policy_arch, FLAGS.orthogonal_init, FLAGS.policy_log_std_multiplier, FLAGS.policy_log_std_offset ) qf = FullyConnectedQFunction(observation_dim, action_dim, FLAGS.qf_arch, FLAGS.orthogonal_init) if FLAGS.cql.target_entropy >= 0.0: FLAGS.cql.target_entropy = -np.prod(eval_sampler.env.action_space.shape).item() sac = ConservativeSAC(FLAGS.cql, policy, qf) sampler_policy = SamplerPolicy(sac.policy, sac.train_params['policy']) viskit_metrics = {} for epoch in range(FLAGS.n_epochs): metrics = {'epoch': epoch} with Timer() as train_timer: for batch_idx in range(FLAGS.n_train_step_per_epoch): batch = dataset.sample(FLAGS.batch_size)
metrics.update(prefix_metrics(sac.train(batch, bc=epoch < FLAGS.bc_epochs), 'sac'))
16
2023-10-18 06:31:20+00:00
16k
SLDGroup/G-CASCADE
lib/networks.py
[ { "identifier": "pvt_v2_b2", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b2(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b2, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "pvt_v2_b5", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b5(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b5, self).__init__(\n patch_size=4, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], mlp_ratios=[4, 4, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 6, 40, 3], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "pvt_v2_b0", "path": "lib/pvtv2.py", "snippet": "class pvt_v2_b0(PyramidVisionTransformerImpr):\n def __init__(self, **kwargs):\n super(pvt_v2_b0, self).__init__(\n patch_size=4, embed_dims=[32, 64, 160, 256], num_heads=[1, 2, 5, 8], mlp_ratios=[8, 8, 4, 4],\n qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1],\n drop_rate=0.0, drop_path_rate=0.1)" }, { "identifier": "CUP", "path": "lib/decoders.py", "snippet": "class CUP(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CUP,self).__init__()\n \n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.ConvBlock3 = conv_block(ch_in=2*channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.ConvBlock2 = conv_block(ch_in=2*channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.ConvBlock1 = conv_block(ch_in=2*channels[3], ch_out=channels[3])\n\n def forward(self,x, skips):\n\n d4 = self.ConvBlock4(x)\n \n # decoding + concat path\n d3 = self.Up3(d4)\n d3 = torch.cat((skips[0],d3),dim=1)\n \n d3 = self.ConvBlock3(d3)\n \n d2 = self.Up2(d3)\n d2 = torch.cat((skips[1],d2),dim=1)\n d2 = self.ConvBlock2(d2)\n\n d1 = self.Up1(d2)\n d1 = torch.cat((skips[2],d1),dim=1)\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1 " }, { "identifier": "CASCADE", "path": "lib/decoders.py", "snippet": "class CASCADE(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CASCADE,self).__init__()\n \n self.Conv_1x1 = nn.Conv2d(channels[0],channels[0],kernel_size=1,stride=1,padding=0)\n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\t\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.AG3 = Attention_block(F_g=channels[1],F_l=channels[1],F_int=channels[2])\n self.ConvBlock3 = conv_block(ch_in=channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.AG2 = Attention_block(F_g=channels[2],F_l=channels[2],F_int=channels[3])\n self.ConvBlock2 = conv_block(ch_in=channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.AG1 = Attention_block(F_g=channels[3],F_l=channels[3],F_int=int(channels[3]/2))\n self.ConvBlock1 = conv_block(ch_in=channels[3], ch_out=channels[3])\n \n self.CA4 = ChannelAttention(channels[0])\n self.CA3 = ChannelAttention(channels[1])\n self.CA2 = ChannelAttention(channels[2])\n self.CA1 = ChannelAttention(channels[3])\n \n self.SA = SPA()\n \n def forward(self,x, skips):\n \n d4 = self.Conv_1x1(x)\n \n # CAM4\n d4 = self.CA4(d4)*d4\n d4 = self.SA(d4)*d4 \n d4 = self.ConvBlock4(d4)\n \n # upconv3\n d3 = self.Up3(d4)\n \n # AG3\n x3 = self.AG3(g=d3,x=skips[0])\n \n # Concat 3\n d3 = d3 + x3\n \n # CAM3\n d3 = self.CA3(d3)*d3\n d3 = self.SA(d3)*d3 \n d3 = self.ConvBlock3(d3)\n \n # upconv2\n d2 = self.Up2(d3)\n \n # AG2\n x2 = self.AG2(g=d2,x=skips[1])\n \n # Concat 2\n d2 = d2 + x2\n \n # CAM2\n d2 = self.CA2(d2)*d2\n d2 = self.SA(d2)*d2\n #print(d2.shape)\n d2 = self.ConvBlock2(d2)\n \n # upconv1\n d1 = self.Up1(d2)\n \n #print(skips[2])\n # AG1\n x1 = self.AG1(g=d1,x=skips[2])\n \n # Concat 1\n d1 = d1 + x1\n \n # CAM1\n d1 = self.CA1(d1)*d1\n d1 = self.SA(d1)*d1\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1" }, { "identifier": "CASCADE_Cat", "path": "lib/decoders.py", "snippet": "class CASCADE_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64]):\n super(CASCADE_Cat,self).__init__()\n \n self.Conv_1x1 = nn.Conv2d(channels[0],channels[0],kernel_size=1,stride=1,padding=0)\n self.ConvBlock4 = conv_block(ch_in=channels[0], ch_out=channels[0])\n\t\n self.Up3 = up_conv(ch_in=channels[0],ch_out=channels[1])\n self.AG3 = Attention_block(F_g=channels[1],F_l=channels[1],F_int=channels[2])\n self.ConvBlock3 = conv_block(ch_in=2*channels[1], ch_out=channels[1])\n\n self.Up2 = up_conv(ch_in=channels[1],ch_out=channels[2])\n self.AG2 = Attention_block(F_g=channels[2],F_l=channels[2],F_int=channels[3])\n self.ConvBlock2 = conv_block(ch_in=2*channels[2], ch_out=channels[2])\n \n self.Up1 = up_conv(ch_in=channels[2],ch_out=channels[3])\n self.AG1 = Attention_block(F_g=channels[3],F_l=channels[3],F_int=int(channels[3]/2))\n self.ConvBlock1 = conv_block(ch_in=2*channels[3], ch_out=channels[3])\n \n self.CA4 = ChannelAttention(channels[0])\n self.CA3 = ChannelAttention(2*channels[1])\n self.CA2 = ChannelAttention(2*channels[2])\n self.CA1 = ChannelAttention(2*channels[3])\n \n self.SA = SPA()\n \n def forward(self,x, skips):\n \n d4 = self.Conv_1x1(x)\n \n # CAM4\n d4 = self.CA4(d4)*d4\n d4 = self.SA(d4)*d4 \n d4 = self.ConvBlock4(d4)\n \n # upconv3\n d3 = self.Up3(d4)\n \n # AG3\n x3 = self.AG3(g=d3,x=skips[0])\n \n # Concat 3\n d3 = torch.cat((x3,d3),dim=1)\n \n # CAM3\n d3 = self.CA3(d3)*d3\n d3 = self.SA(d3)*d3 \n d3 = self.ConvBlock3(d3)\n \n # upconv2\n d2 = self.Up2(d3)\n \n # AG2\n x2 = self.AG2(g=d2,x=skips[1])\n \n # Concat 2\n d2 = torch.cat((x2,d2),dim=1)\n \n # CAM2\n d2 = self.CA2(d2)*d2\n d2 = self.SA(d2)*d2\n #print(d2.shape)\n d2 = self.ConvBlock2(d2)\n \n # upconv1\n d1 = self.Up1(d2)\n \n #print(skips[2])\n # AG1\n x1 = self.AG1(g=d1,x=skips[2])\n \n # Concat 1\n d1 = torch.cat((x1,d1),dim=1)\n \n # CAM1\n d1 = self.CA1(d1)*d1\n d1 = self.SA(d1)*d1\n d1 = self.ConvBlock1(d1)\n return d4, d3, d2, d1 " }, { "identifier": "GCUP", "path": "lib/decoders.py", "snippet": "class GCUP(nn.Module):\n def __init__(self, channels=[512,320,128,64], img_size=224, drop_path_rate=0.0, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCUP,self).__init__()\n \n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4, 2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0],ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1],ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2],ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n )\n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = d3 + skips[0]\n \n # GCAM3\n d3 = self.gcb3(d3) \n \n # UCB2\n d2 = self.ucb2(d3) \n \n # Aggregation 2\n d2 = d2 + skips[1] \n \n # GCAM2\n d2 = self.gcb2(d2)\n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = d1 + skips[2]\n \n # GCAM1\n d1 = self.gcb1(d1)\n \n return d4, d3, d2, d1" }, { "identifier": "GCUP_Cat", "path": "lib/decoders.py", "snippet": "class GCUP_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64], img_size=224, drop_path_rate=0.0, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCUP_Cat,self).__init__()\n \n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4, 2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0],ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(2*channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=2*channels[1],ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(2*channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=2*channels[2],ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(2*channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n ) \n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n \n # UCB3\n d3 = self.ucb3(d4)\n\n # Aggregation 3\n d3 = torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n\n # UCB2\n d2 = self.ucb2(d3)\n\n # Aggregation 2\n d2 = torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n\n return d4, d3, d2, d1" }, { "identifier": "GCASCADE", "path": "lib/decoders.py", "snippet": "class GCASCADE(nn.Module):\n def __init__(self, channels=[512,320,128,64], drop_path_rate=0.0, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCASCADE,self).__init__()\n\n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4,2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0], ch_out=channels[1], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1], self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1], ch_out=channels[2], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2], self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2], ch_out=channels[3], kernel_size=self.ucb_ks, stride=self.ucb_stride, padding=self.ucb_pad, groups=channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3], self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n )\n\n self.spa = SPA()\n\n \n def forward(self,x, skips):\n \n # GCAM4\n d4 = self.gcb4(x) \n d4 = self.spa(d4)*d4 \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = d3 + skips[0] #torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n d3 = self.spa(d3)*d3 \n \n # UCB2\n d2 = self.ucb2(d3)\n \n # Aggregation 2\n d2 = d2 + skips[1] #torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n d2 = self.spa(d2)*d2\n \n \n # UCB1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = d1 + skips[2] #torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n d1 = self.spa(d1)*d1\n \n return d4, d3, d2, d1" }, { "identifier": "GCASCADE_Cat", "path": "lib/decoders.py", "snippet": "class GCASCADE_Cat(nn.Module):\n def __init__(self, channels=[512,320,128,64], drop_path_rate=0.0, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu'):\n super(GCASCADE_Cat,self).__init__()\n\n # Up-convolution block (UCB) parameters\n self.ucb_ks = 3\n self.ucb_pad = 1\n self.ucb_stride = 1\n self.activation = activation\n \n # Graph convolution block (GCB) parameters\n self.padding=padding\n self.k = k # neighbor num (default:9)\n self.conv = conv # graph conv layer {edge, mr, sage, gin} # default mr\n self.gcb_act = gcb_act # activation layer for graph convolution block {relu, prelu, leakyrelu, gelu, hswish}\n self.gcb_norm = 'batch' # batch or instance normalization for graph convolution block {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.reduce_ratios = [1,1,4,2]\n self.dpr = [self.drop_path,self.drop_path,self.drop_path,self.drop_path] # stochastic depth decay rule \n self.num_knn = [self.k,self.k,self.k,self.k] # number of knn's k\n self.max_dilation = 18 // max(self.num_knn)\n self.HW = img_size // 4 * img_size // 4\n \n self.gcb4 = nn.Sequential(GCB(channels[0], self.num_knn[0], min(0 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[0], n=self.HW//(4*4*4), drop_path=self.dpr[0],\n relative_pos=True, padding=self.padding),\n )\n\t\n self.ucb3 = UCB(ch_in=channels[0], ch_out=channels[1], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[0], activation=self.activation)\n self.gcb3 = nn.Sequential(GCB(channels[1]*2, self.num_knn[1], min(3 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[1], n=self.HW//(4*4), drop_path=self.dpr[1],\n relative_pos=True, padding=self.padding),\n )\n\n self.ucb2 = UCB(ch_in=channels[1]*2, ch_out=channels[2], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[1], activation=self.activation)\n self.gcb2 = nn.Sequential(GCB(channels[2]*2, self.num_knn[2], min(8 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[2], n=self.HW//(4), drop_path=self.dpr[2],\n relative_pos=True, padding=self.padding),\n )\n \n self.ucb1 = UCB(ch_in=channels[2]*2, ch_out=channels[3], kernel_size=self.ucb_ks, stride = self.ucb_stride, padding = self.ucb_pad, groups = channels[2], activation=self.activation)\n self.gcb1 = nn.Sequential(GCB(channels[3]*2, self.num_knn[3], min(11 // 4 + 1, self.max_dilation), self.conv, self.gcb_act, self.gcb_norm,\n self.bias, self.use_stochastic, self.epsilon, self.reduce_ratios[3], n=self.HW, drop_path=self.dpr[3],\n relative_pos=True, padding=self.padding),\n ) \n \n self.spa = SPA()\n\n \n def forward(self,x, skips): \n \n # GCAM4\n d4 = self.gcb4(x) \n d4 = self.spa(d4)*d4 \n \n # UCB3\n d3 = self.ucb3(d4)\n \n # Aggregation 3\n d3 = torch.cat((skips[0],d3),dim=1)\n \n # GCAM3\n d3 = self.gcb3(d3)\n d3 = self.spa(d3)*d3 \n \n # ucb2\n d2 = self.ucb2(d3)\n \n # Aggregation 2\n d2 = torch.cat((skips[1],d2),dim=1)\n \n # GCAM2\n d2 = self.gcb2(d2)\n d2 = self.spa(d2)*d2\n \n \n # ucb1\n d1 = self.ucb1(d2)\n \n # Aggregation 1\n d1 = torch.cat((skips[2],d1),dim=1)\n \n # GCAM1\n d1 = self.gcb1(d1)\n d1 = self.spa(d1)*d1\n \n return d4, d3, d2, d1" }, { "identifier": "pvig_ti_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_ti_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,6,2] # number of basic blocks in the backbone\n self.channels = [48, 96, 240, 384] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_s_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_s_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,6,2] # number of basic blocks in the backbone\n self.channels = [80, 160, 400, 640] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_m_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_m_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,16,2] # number of basic blocks in the backbone\n self.channels = [96, 192, 384, 768] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_224_gelu']\n return model" }, { "identifier": "pvig_b_224_gelu", "path": "lib/pyramid_vig.py", "snippet": "@register_model\ndef pvig_b_224_gelu(pretrained=False, **kwargs):\n class OptInit:\n def __init__(self, num_classes=1000, drop_path_rate=0.0, **kwargs):\n self.k = 9 # neighbor num (default:9)\n self.conv = 'mr' # graph conv layer {edge, mr}\n self.act = 'gelu' # activation layer {relu, prelu, leakyrelu, gelu, hswish}\n self.norm = 'batch' # batch or instance normalization {batch, instance}\n self.bias = True # bias of conv layer True or False\n self.dropout = 0.0 # dropout rate\n self.use_dilation = True # use dilated knn or not\n self.epsilon = 0.2 # stochastic epsilon for gcn\n self.use_stochastic = False # stochastic for gcn, True or False\n self.drop_path = drop_path_rate\n self.blocks = [2,2,18,2] # number of basic blocks in the backbone\n self.channels = [128, 256, 512, 1024] # number of channels of deep features\n self.n_classes = num_classes # Dimension of out_channels\n self.emb_dims = 1024 # Dimension of embeddings\n\n opt = OptInit(**kwargs)\n model = DeepGCN(opt)\n model.default_cfg = default_cfgs['vig_b_224_gelu']\n return model" }, { "identifier": "maxvit_tiny_rw_224", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_tiny_rw_224(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_tiny_rw_224', pretrained=pretrained, **kwargs)" }, { "identifier": "maxvit_rmlp_tiny_rw_256", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_rmlp_tiny_rw_256(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_rmlp_tiny_rw_256', pretrained=pretrained, **kwargs)" }, { "identifier": "maxxvit_rmlp_small_rw_256", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxxvit_rmlp_small_rw_256(pretrained=False, **kwargs):\n return _create_maxxvit('maxxvit_rmlp_small_rw_256', pretrained=pretrained, **kwargs)" }, { "identifier": "maxvit_rmlp_small_rw_224", "path": "lib/maxxvit_4out.py", "snippet": "@register_model\ndef maxvit_rmlp_small_rw_224(pretrained=False, **kwargs):\n return _create_maxxvit('maxvit_rmlp_small_rw_224', pretrained=pretrained, **kwargs)" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import timm import logging from scipy import ndimage from lib.pvtv2 import pvt_v2_b2, pvt_v2_b5, pvt_v2_b0 from lib.decoders import CUP, CASCADE, CASCADE_Cat, GCUP, GCUP_Cat, GCASCADE, GCASCADE_Cat from lib.pyramid_vig import pvig_ti_224_gelu, pvig_s_224_gelu, pvig_m_224_gelu, pvig_b_224_gelu from lib.maxxvit_4out import maxvit_tiny_rw_224 as maxvit_tiny_rw_224_4out from lib.maxxvit_4out import maxvit_rmlp_tiny_rw_256 as maxvit_rmlp_tiny_rw_256_4out from lib.maxxvit_4out import maxxvit_rmlp_small_rw_256 as maxxvit_rmlp_small_rw_256_4out from lib.maxxvit_4out import maxvit_rmlp_small_rw_224 as maxvit_rmlp_small_rw_224_4out
13,348
('GCUP_decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight
logger = logging.getLogger(__name__) def np2th(weights, conv=False): """Possibly convert HWIO to OIHW.""" if conv: weights = weights.transpose([3, 2, 0, 1]) return torch.from_numpy(weights) class PVT_CUP(nn.Module): def __init__(self, n_class=1): super(PVT_CUP, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CUP(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CUP decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) # decoder initialization self.decoder = CASCADE(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_CASCADE_Cat(nn.Module): def __init__(self, n_class=1): super(PVT_CASCADE_Cat, self).__init__() # conv block to convert single channel to 3 channels self.conv = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) print('Model %s created, param count: %d' % ('PVT backbone: ', sum([m.numel() for m in self.backbone.parameters()]))) # decoder initialization self.decoder = CASCADE_Cat(channels=[512, 320, 128, 64]) print('Model %s created, param count: %d' % ('CASCADE_Cat decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(512, n_class, 1) self.out_head2 = nn.Conv2d(320, n_class, 1) self.out_head3 = nn.Conv2d(128, n_class, 1) self.out_head4 = nn.Conv2d(64, n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCUP(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCUP, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCUP_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCUP(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCUP_decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class PVT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size=224, k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', skip_aggregation='additive'): super(PVT_GCASCADE, self).__init__() self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight self.backbone = pvt_v2_b2() # [64, 128, 320, 512] path = './pretrained_pth/pvt/pvt_v2_b2.pth' save_model = torch.load(path) model_dict = self.backbone.state_dict() state_dict = {k: v for k, v in save_model.items() if k in model_dict.keys()} model_dict.update(state_dict) self.backbone.load_state_dict(model_dict) self.channels = [512, 320, 128, 64] # decoder initialization if self.skip_aggregation == 'additive': self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) elif self.skip_aggregation == 'concatenation': self.decoder = GCASCADE_Cat(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) self.channels = [self.channels[0], self.channels[1]*2, self.channels[2]*2, self.channels[3]*2] else: print('No implementation found for the skip_aggregation ' + self.skip_aggregation + '. Continuing with the default additive aggregation.') self.decoder = GCASCADE(channels=self.channels, img_size=img_size, k=k, padding=padding, conv=conv, gcb_act=gcb_act, activation=activation) print('Model %s created, param count: %d' % ('GCASCADE decoder: ', sum([m.numel() for m in self.decoder.parameters()]))) # Prediction heads initialization self.out_head1 = nn.Conv2d(self.channels[0], self.n_class, 1) self.out_head2 = nn.Conv2d(self.channels[1], self.n_class, 1) self.out_head3 = nn.Conv2d(self.channels[2], self.n_class, 1) self.out_head4 = nn.Conv2d(self.channels[3], self.n_class, 1) def forward(self, x): # if grayscale input, convert to 3 channels if x.size()[1] == 1: x = self.conv_1cto3c(x) # transformer backbone as encoder x1, x2, x3, x4 = self.backbone(x) # decoder x1_o, x2_o, x3_o, x4_o = self.decoder(x4, [x3, x2, x1]) # prediction heads p1 = self.out_head1(x1_o) p2 = self.out_head2(x2_o) p3 = self.out_head3(x3_o) p4 = self.out_head4(x4_o) p1 = F.interpolate(p1, scale_factor=32, mode='bilinear') p2 = F.interpolate(p2, scale_factor=16, mode='bilinear') p3 = F.interpolate(p3, scale_factor=8, mode='bilinear') p4 = F.interpolate(p4, scale_factor=4, mode='bilinear') return p1, p2, p3, p4 class MERIT_GCASCADE(nn.Module): def __init__(self, n_class=1, img_size_s1=(256,256), img_size_s2=(224,224), k=11, padding=5, conv='mr', gcb_act='gelu', activation='relu', interpolation='bilinear', skip_aggregation='additive'): super(MERIT_GCASCADE, self).__init__() self.interpolation = interpolation self.img_size_s1 = img_size_s1 self.img_size_s2 = img_size_s2 self.skip_aggregation = skip_aggregation self.n_class = n_class # conv block to convert single channel to 3 channels self.conv_1cto3c = nn.Sequential( nn.Conv2d(1, 3, kernel_size=1), nn.BatchNorm2d(3), nn.ReLU(inplace=True) ) # backbone network initialization with pretrained weight
self.backbone1 = maxxvit_rmlp_small_rw_256_4out() # [64, 128, 320, 512]
9
2023-10-24 17:49:10+00:00
16k
boppreh/hello_tls
src/hello_tls/scan.py
[ { "identifier": "ClientHello", "path": "src/hello_tls/protocol.py", "snippet": "class ScanError(Exception):\nclass ServerAlertError(ScanError):\nclass BadServerResponse(ScanError):\nclass ServerHello:\nclass ClientHello:\n def __init__(self, level: AlertLevel, description: AlertDescription):\ndef _make_stream_parser(packets: Iterable[bytes]) -> Tuple[Callable[[int], bytes], Callable[[], int]]:\n def read_next(length: int) -> bytes:\ndef _bytes_to_int(b: bytes) -> int:\ndef parse_server_hello(packets: Iterable[bytes]) -> ServerHello:\ndef make_client_hello(client_hello: ClientHello) -> bytes:\n def prefix_length(block_name: str, width_bytes: int = 2) -> Iterator[None]:" }, { "identifier": "AlertDescription", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertDescription(Enum):\n \"\"\" Different alert messages that can be sent by the server. \"\"\"\n close_notify = b'\\x00'\n unexpected_message = b'\\x0a'\n bad_record_mac = b'\\x14'\n record_overflow = b'\\x16'\n handshake_failure = b'\\x28'\n bad_certificate = b'\\x2a'\n unsupported_certificate = b'\\x2b'\n certificate_revoked = b'\\x2c'\n certificate_expired = b'\\x2d'\n certificate_unknown = b'\\x2e'\n illegal_parameter = b'\\x2f'\n unknown_ca = b'\\x30'\n access_denied = b'\\x31'\n decode_error = b'\\x32'\n decrypt_error = b'\\x33'\n protocol_version = b'\\x46'\n insufficient_security = b'\\x47'\n internal_error = b'\\x50'\n inappropriate_fallback = b'\\x56'\n user_canceled = b'\\x5a'\n missing_extension = b'\\x6d'\n unsupported_extension = b'\\x6e'\n unrecognized_name = b'\\x70'\n bad_certificate_status_response = b'\\x71'\n unknown_psk_identity = b'\\x73'\n certificate_required = b'\\x74'\n no_application_protocol = b'\\x78'" }, { "identifier": "CipherSuite", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CipherSuite(Enum):\n def __repr__(self):\n return self.name\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each cipher suite with the protocols it's supported at.\n # Default to all but TLS 1.3, because that's the most common.\n def __init__(self, _: bytes, protocols: Sequence[Protocol] = (Protocol.SSLv3, Protocol.TLS1_0, Protocol.TLS1_1, Protocol.TLS1_2)):\n self.protocols = protocols\n\n # Pseudo cipher suite, not actually picked.\n #TLS_EMPTY_RENEGOTIATION_INFO_SCSV = b\"\\x00\\xff\"\n\n # TLS 1.3 cipher suites.\n TLS_AES_128_GCM_SHA256 = b\"\\x13\\x01\", (Protocol.TLS1_3,)\n TLS_AES_256_GCM_SHA384 = b\"\\x13\\x02\", (Protocol.TLS1_3,)\n TLS_CHACHA20_POLY1305_SHA256 = b\"\\x13\\x03\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_SHA256 = b\"\\x13\\x04\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_8_SHA256 = b\"\\x13\\x05\", (Protocol.TLS1_3,)\n\n # Cipher suite that had its number reassigned.\n OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xcc\\x13'\n \n # Cipher suites adapted from IANA assignments:\n # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4\n TLS_AEGIS_128L_SHA256 = b'\\x13\\x07' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_AEGIS_256_SHA384 = b'\\x13\\x06' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x19' # [RFC4346]\n TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x17' # [RFC4346][RFC6347]\n TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1B' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA = b'\\x00\\x34' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA256 = b'\\x00\\x6C' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA6' # [RFC5288]\n TLS_DH_anon_WITH_AES_256_CBC_SHA = b'\\x00\\x3A' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6D' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA7' # [RFC5288]\n TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x46' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5A' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x47' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5B' # [RFC6209]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x46' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBF' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x84' # [RFC6367]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x89' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC5' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x85' # [RFC6367]\n TLS_DH_anon_WITH_DES_CBC_SHA = b'\\x00\\x1A' # [RFC8996]\n TLS_DH_anon_WITH_RC4_128_MD5 = b'\\x00\\x18' # [RFC5246][RFC6347]\n TLS_DH_anon_WITH_SEED_CBC_SHA = b'\\x00\\x9B' # [RFC4162]\n TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0B' # [RFC4346]\n TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0D' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x30' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3E' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA4' # [RFC5288]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x36' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x68' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA5' # [RFC5288]\n TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3E' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x58' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3F' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x59' # [RFC6209]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x42' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBB' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x82' # [RFC6367]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x85' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC1' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x83' # [RFC6367]\n TLS_DH_DSS_WITH_DES_CBC_SHA = b'\\x00\\x0C' # [RFC8996]\n TLS_DH_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x97' # [RFC4162]\n TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0E' # [RFC4346]\n TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x10' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x31' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3F' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA0' # [RFC5288]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x37' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x69' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA1' # [RFC5288]\n TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x40' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x54' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x41' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x55' # [RFC6209]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x43' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBC' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7E' # [RFC6367]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x86' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC2' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7F' # [RFC6367]\n TLS_DH_RSA_WITH_DES_CBC_SHA = b'\\x00\\x0F' # [RFC8996]\n TLS_DH_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x98' # [RFC4162]\n TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x11' # [RFC4346]\n TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x13' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x32' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x40' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA2' # [RFC5288]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x38' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6A' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA3' # [RFC5288]\n TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x42' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x56' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x43' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x57' # [RFC6209]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x44' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBD' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x80' # [RFC6367]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x87' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC3' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x81' # [RFC6367]\n TLS_DHE_DSS_WITH_DES_CBC_SHA = b'\\x00\\x12' # [RFC8996]\n TLS_DHE_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x99' # [RFC4162]\n TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8F' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x90' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB2' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_128_CCM = b'\\xC0\\xA6' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAA' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x91' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB3' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CCM = b'\\xC0\\xA7' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAB' # [RFC5487]\n TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x66' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6C' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x67' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6D' # [RFC6209]\n TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x96' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x90' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x97' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x91' # [RFC6367]\n TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAD' # [RFC7905]\n TLS_DHE_PSK_WITH_NULL_SHA = b'\\x00\\x2D' # [RFC4785]\n TLS_DHE_PSK_WITH_NULL_SHA256 = b'\\x00\\xB4' # [RFC5487]\n TLS_DHE_PSK_WITH_NULL_SHA384 = b'\\x00\\xB5' # [RFC5487]\n TLS_DHE_PSK_WITH_RC4_128_SHA = b'\\x00\\x8E' # [RFC4279][RFC6347]\n TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x14' # [RFC4346]\n TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x16' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x33' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x67' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CCM = b'\\xC0\\x9E' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA2' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9E' # [RFC5288]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x39' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6B' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CCM = b'\\xC0\\x9F' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA3' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9F' # [RFC5288]\n TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x44' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x52' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x45' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x53' # [RFC6209]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x45' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBE' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7C' # [RFC6367]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x88' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC4' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7D' # [RFC6367]\n TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAA' # [RFC7905]\n TLS_DHE_RSA_WITH_DES_CBC_SHA = b'\\x00\\x15' # [RFC8996]\n TLS_DHE_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x9A' # [RFC4162]\n TLS_ECCPWD_WITH_AES_128_CCM_SHA256 = b'\\xC0\\xB2' # [RFC8492]\n TLS_ECCPWD_WITH_AES_128_GCM_SHA256 = b'\\xC0\\xB0' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_CCM_SHA384 = b'\\xC0\\xB3' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_GCM_SHA384 = b'\\xC0\\xB1' # [RFC8492]\n TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x17' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_128_CBC_SHA = b'\\xC0\\x18' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_256_CBC_SHA = b'\\xC0\\x19' # [RFC8422]\n TLS_ECDH_anon_WITH_NULL_SHA = b'\\xC0\\x15' # [RFC8422]\n TLS_ECDH_anon_WITH_RC4_128_SHA = b'\\xC0\\x16' # [RFC8422][RFC6347]\n TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x03' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x04' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x25' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2D' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x05' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x26' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2E' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4A' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5E' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4B' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5F' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x74' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x88' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x75' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x89' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_NULL_SHA = b'\\xC0\\x01' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x02' # [RFC8422][RFC6347]\n TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x0D' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x0E' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x29' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x31' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0F' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x2A' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x32' # [RFC5289]\n TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4E' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x62' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4F' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x63' # [RFC6209]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x78' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8C' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x79' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8D' # [RFC6367]\n TLS_ECDH_RSA_WITH_NULL_SHA = b'\\xC0\\x0B' # [RFC8422]\n TLS_ECDH_RSA_WITH_RC4_128_SHA = b'\\xC0\\x0C' # [RFC8422][RFC6347]\n TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x08' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x09' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x23' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM = b'\\xC0\\xAC' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = b'\\xC0\\xAE' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2B' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0A' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x24' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM = b'\\xC0\\xAD' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = b'\\xC0\\xAF' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2C' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x48' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5C' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x49' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5D' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x72' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x86' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x73' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x87' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA9' # [RFC7905]\n TLS_ECDHE_ECDSA_WITH_NULL_SHA = b'\\xC0\\x06' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x07' # [RFC8422][RFC6347]\n TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x34' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = b'\\xC0\\x35' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x37' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = b'\\xD0\\x03' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = b'\\xD0\\x05' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\xD0\\x01' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = b'\\xC0\\x36' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x38' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\xD0\\x02' # [RFC8442]\n TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x70' # [RFC6209]\n TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x71' # [RFC6209]\n TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x9A' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x9B' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAC' # [RFC7905]\n TLS_ECDHE_PSK_WITH_NULL_SHA = b'\\xC0\\x39' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA256 = b'\\xC0\\x3A' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA384 = b'\\xC0\\x3B' # [RFC5489]\n TLS_ECDHE_PSK_WITH_RC4_128_SHA = b'\\xC0\\x33' # [RFC5489][RFC6347]\n TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x12' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x13' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x27' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2F' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x14' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x28' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x30' # [RFC5289]\n TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4C' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x60' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4D' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x61' # [RFC6209]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x76' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8A' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x77' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8B' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA8' # [RFC7905]\n TLS_ECDHE_RSA_WITH_NULL_SHA = b'\\xC0\\x10' # [RFC8422]\n TLS_ECDHE_RSA_WITH_RC4_128_SHA = b'\\xC0\\x11' # [RFC8422][RFC6347]\n TLS_GOSTR341112_256_WITH_28147_CNT_IMIT = b'\\xC1\\x02' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_CTR_OMAC = b'\\xC1\\x00' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_L = b'\\xC1\\x03' # [RFC9367]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_S = b'\\xC1\\x05' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_CTR_OMAC = b'\\xC1\\x01' # [RFC9189]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_L = b'\\xC1\\x04' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_S = b'\\xC1\\x06' # [RFC9367]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = b'\\x00\\x29' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = b'\\x00\\x26' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x2A' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = b'\\x00\\x27' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x2B' # [RFC2712][RFC6347]\n TLS_KRB5_EXPORT_WITH_RC4_40_SHA = b'\\x00\\x28' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = b'\\x00\\x23' # [RFC2712]\n TLS_KRB5_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1F' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_MD5 = b'\\x00\\x22' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_SHA = b'\\x00\\x1E' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_MD5 = b'\\x00\\x25' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_SHA = b'\\x00\\x21' # [RFC2712]\n TLS_KRB5_WITH_RC4_128_MD5 = b'\\x00\\x24' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_RC4_128_SHA = b'\\x00\\x20' # [RFC2712][RFC6347]\n TLS_NULL_WITH_NULL_NULL = b'\\x00\\x00' # [RFC5246]\n TLS_PSK_DHE_WITH_AES_128_CCM_8 = b'\\xC0\\xAA' # [RFC6655]\n TLS_PSK_DHE_WITH_AES_256_CCM_8 = b'\\xC0\\xAB' # [RFC6655]\n TLS_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8B' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x8C' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xAE' # [RFC5487]\n TLS_PSK_WITH_AES_128_CCM = b'\\xC0\\xA4' # [RFC6655]\n TLS_PSK_WITH_AES_128_CCM_8 = b'\\xC0\\xA8' # [RFC6655]\n TLS_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA8' # [RFC5487]\n TLS_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x8D' # [RFC4279]\n TLS_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xAF' # [RFC5487]\n TLS_PSK_WITH_AES_256_CCM = b'\\xC0\\xA5' # [RFC6655]\n TLS_PSK_WITH_AES_256_CCM_8 = b'\\xC0\\xA9' # [RFC6655]\n TLS_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA9' # [RFC5487]\n TLS_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x64' # [RFC6209]\n TLS_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6A' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x65' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6B' # [RFC6209]\n TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x94' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8E' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x95' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8F' # [RFC6367]\n TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAB' # [RFC7905]\n TLS_PSK_WITH_NULL_SHA = b'\\x00\\x2C' # [RFC4785]\n TLS_PSK_WITH_NULL_SHA256 = b'\\x00\\xB0' # [RFC5487]\n TLS_PSK_WITH_NULL_SHA384 = b'\\x00\\xB1' # [RFC5487]\n TLS_PSK_WITH_RC4_128_SHA = b'\\x00\\x8A' # [RFC4279][RFC6347]\n TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x08' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x06' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x03' # [RFC4346][RFC6347]\n TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x93' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x94' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB6' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAC' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x95' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB7' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAD' # [RFC5487]\n TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x68' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6E' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x69' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6F' # [RFC6209]\n TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x98' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x92' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x99' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x93' # [RFC6367]\n TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAE' # [RFC7905]\n TLS_RSA_PSK_WITH_NULL_SHA = b'\\x00\\x2E' # [RFC4785]\n TLS_RSA_PSK_WITH_NULL_SHA256 = b'\\x00\\xB8' # [RFC5487]\n TLS_RSA_PSK_WITH_NULL_SHA384 = b'\\x00\\xB9' # [RFC5487]\n TLS_RSA_PSK_WITH_RC4_128_SHA = b'\\x00\\x92' # [RFC4279][RFC6347]\n TLS_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0A' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x2F' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3C' # [RFC5246]\n TLS_RSA_WITH_AES_128_CCM = b'\\xC0\\x9C' # [RFC6655]\n TLS_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA0' # [RFC6655]\n TLS_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9C' # [RFC5288]\n TLS_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x35' # [RFC5246]\n TLS_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x3D' # [RFC5246]\n TLS_RSA_WITH_AES_256_CCM = b'\\xC0\\x9D' # [RFC6655]\n TLS_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA1' # [RFC6655]\n TLS_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9D' # [RFC5288]\n TLS_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3C' # [RFC6209]\n TLS_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x50' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3D' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x51' # [RFC6209]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x41' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBA' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7A' # [RFC6367]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x84' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC0' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7B' # [RFC6367]\n TLS_RSA_WITH_DES_CBC_SHA = b'\\x00\\x09' # [RFC8996]\n TLS_RSA_WITH_IDEA_CBC_SHA = b'\\x00\\x07' # [RFC8996]\n TLS_RSA_WITH_NULL_MD5 = b'\\x00\\x01' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA = b'\\x00\\x02' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA256 = b'\\x00\\x3B' # [RFC5246]\n TLS_RSA_WITH_RC4_128_MD5 = b'\\x00\\x04' # [RFC5246][RFC6347]\n TLS_RSA_WITH_RC4_128_SHA = b'\\x00\\x05' # [RFC5246][RFC6347]\n TLS_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x96' # [RFC4162]\n TLS_SHA256_SHA256 = b'\\xC0\\xB4' # [RFC9150]\n TLS_SHA384_SHA384 = b'\\xC0\\xB5' # [RFC9150]\n TLS_SM4_CCM_SM3 = b'\\x00\\xC7' # [RFC8998]\n TLS_SM4_GCM_SM3 = b'\\x00\\xC6' # [RFC8998]\n TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1C' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = b'\\xC0\\x1F' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = b'\\xC0\\x22' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1B' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1E' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x21' # [RFC5054]\n TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1A' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1D' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_256_CBC_SHA = b'\\xC0\\x20' # [RFC5054]" }, { "identifier": "Group", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Group(Enum):\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each group with whether it's a PQ group.\n def __init__(self, _: bytes, is_pq: bool = False):\n self.is_pq = is_pq\n def __repr__(self):\n return self.name\n \n sect163k1 = b'\\x00\\x01'\n sect163r1 = b'\\x00\\x02'\n sect163r2 = b'\\x00\\x03'\n sect193r1 = b'\\x00\\x04'\n sect193r2 = b'\\x00\\x05'\n sect233k1 = b'\\x00\\x06'\n sect233r1 = b'\\x00\\x07'\n sect239k1 = b'\\x00\\x08'\n sect283k1 = b'\\x00\\x09'\n sect283r1 = b'\\x00\\x0a'\n sect409k1 = b'\\x00\\x0b'\n sect409r1 = b'\\x00\\x0c'\n sect571k1 = b'\\x00\\x0d'\n sect571r1 = b'\\x00\\x0e'\n secp160k1 = b'\\x00\\x0f'\n secp160r1 = b'\\x00\\x10'\n secp160r2 = b'\\x00\\x11'\n secp192k1 = b'\\x00\\x12'\n secp192r1 = b'\\x00\\x13'\n secp224k1 = b'\\x00\\x14'\n secp224r1 = b'\\x00\\x15'\n secp256k1 = b'\\x00\\x16'\n secp256r1 = b'\\x00\\x17'\n secp384r1 = b'\\x00\\x18'\n secp521r1 = b'\\x00\\x19'\n brainpoolP256r1 = b'\\x00\\x1a'\n brainpoolP384r1 = b'\\x00\\x1b'\n brainpoolP512r1 = b'\\x00\\x1c'\n x25519 = b'\\x00\\x1d'\n x448 = b'\\x00\\x1e'\n brainpoolP256r1tls13 = b'\\x00\\x1f'\n brainpoolP384r1tls13 = b'\\x00\\x20'\n brainpoolP512r1tls13 = b'\\x00\\x21'\n GC256A = b'\\x00\\x22'\n GC256B = b'\\x00\\x23'\n GC256C = b'\\x00\\x24'\n GC256D = b'\\x00\\x25'\n GC512A = b'\\x00\\x26'\n GC512B = b'\\x00\\x27'\n GC512C = b'\\x00\\x28'\n curveSM2 = b'\\x00\\x29'\n ffdhe2048 = b'\\x01\\x00'\n ffdhe3072 = b'\\x01\\x01'\n ffdhe4096 = b'\\x01\\x02'\n ffdhe6144 = b'\\x01\\x03'\n ffdhe8192 = b'\\x01\\x04'\n arbitrary_explicit_prime_curves = b'\\xff\\x01'\n arbitrary_explicit_char2_curves = b'\\xff\\x02'\n\n # Somewhat common post-quantum groups, not yet standardized:\n X25519Kyber768Draft00 = b'\\x63\\x99', True\n X25519Kyber768Draft00_obsolete = b'\\xfe\\x31', True\n X25519Kyber512Draft00 = b'\\xfe\\x30', True\n SecP256r1Kyber768Draft00 = b'\\x63\\x9a', True\n\n # Long list of unusual post-quantum groups from liboqs:\n # https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md?plain=1#L13\n frodo640aes = b'\\x02\\x00', True\n p256_frodo640aes = b'\\x2F\\x00', True\n x25519_frodo640aes = b'\\x2F\\x80', True\n frodo640shake = b'\\x02\\x01', True\n p256_frodo640shake = b'\\x2F\\x01', True\n x25519_frodo640shake = b'\\x2F\\x81', True\n frodo976aes = b'\\x02\\x02', True\n p384_frodo976aes = b'\\x2F\\x02', True\n x448_frodo976aes = b'\\x2F\\x82', True\n frodo976shake = b'\\x02\\x03', True\n p384_frodo976shake = b'\\x2F\\x03', True\n x448_frodo976shake = b'\\x2F\\x83', True\n frodo1344aes = b'\\x02\\x04', True\n p521_frodo1344aes = b'\\x2F\\x04', True\n frodo1344shake = b'\\x02\\x05', True\n p521_frodo1344shake = b'\\x2F\\x05', True\n kyber512 = b'\\x02\\x3A', True\n p256_kyber512 = b'\\x2F\\x3A', True\n x25519_kyber512 = b'\\x2F\\x39', True\n kyber768 = b'\\x02\\x3C', True\n p384_kyber768 = b'\\x2F\\x3C', True\n x448_kyber768 = b'\\x2F\\x90', True\n kyber1024 = b'\\x02\\x3D', True\n p521_kyber1024 = b'\\x2F\\x3D', True\n bikel1 = b'\\x02\\x41', True\n p256_bikel1 = b'\\x2F\\x41', True\n x25519_bikel1 = b'\\x2F\\xAE', True\n bikel3 = b'\\x02\\x42', True\n p384_bikel3 = b'\\x2F\\x42', True\n x448_bikel3 = b'\\x2F\\xAF', True\n bikel5 = b'\\x02\\x43', True\n p521_bikel5 = b'\\x2F\\x43', True\n hqc128 = b'\\x02\\x2C', True\n p256_hqc128 = b'\\x2F\\x2C', True\n x25519_hqc128 = b'\\x2F\\xAC', True\n hqc192 = b'\\x02\\x2D', True\n p384_hqc192 = b'\\x2F\\x2D', True\n x448_hqc192 = b'\\x2F\\xAD', True\n hqc256 = b'\\x02\\x2E', True\n p521_hqc256 = b'\\x2F\\x2E', True\n dilithium2 = b'\\xfe\\xa0', True\n p256_dilithium2 = b'\\xfe\\xa1', True\n rsa3072_dilithium2 = b'\\xfe\\xa2', True\n dilithium3 = b'\\xfe\\xa3', True\n p384_dilithium3 = b'\\xfe\\xa4', True\n dilithium5 = b'\\xfe\\xa5', True\n p521_dilithium5 = b'\\xfe\\xa6', True\n falcon512 = b'\\xfe\\xae', True\n p256_falcon512 = b'\\xfe\\xaf', True\n rsa3072_falcon512 = b'\\xfe\\xb0', True\n falcon1024 = b'\\xfe\\xb1', True\n p521_falcon1024 = b'\\xfe\\xb2', True\n sphincssha2128fsimple = b'\\xfe\\xb3', True\n p256_sphincssha2128fsimple = b'\\xfe\\xb4', True\n rsa3072_sphincssha2128fsimple = b'\\xfe\\xb5', True\n sphincssha2128ssimple = b'\\xfe\\xb6', True\n p256_sphincssha2128ssimple = b'\\xfe\\xb7', True\n rsa3072_sphincssha2128ssimple = b'\\xfe\\xb8', True\n sphincssha2192fsimple = b'\\xfe\\xb9', True\n p384_sphincssha2192fsimple = b'\\xfe\\xba', True\n sphincssha2192ssimple = b'\\xfe\\xbb', True\n p384_sphincssha2192ssimple = b'\\xfe\\xbc', True\n sphincssha2256fsimple = b'\\xfe\\xbd', True\n p521_sphincssha2256fsimple = b'\\xfe\\xbe', True\n sphincssha2256ssimple = b'\\xfe\\xc0', True\n p521_sphincssha2256ssimple = b'\\xfe\\xc1', True\n sphincsshake128fsimple = b'\\xfe\\xc2', True\n p256_sphincsshake128fsimple = b'\\xfe\\xc3', True\n rsa3072_sphincsshake128fsimple = b'\\xfe\\xc4', True\n sphincsshake128ssimple = b'\\xfe\\xc5', True\n p256_sphincsshake128ssimple = b'\\xfe\\xc6', True\n rsa3072_sphincsshake128ssimple = b'\\xfe\\xc7', True\n sphincsshake192fsimple = b'\\xfe\\xc8', True\n p384_sphincsshake192fsimple = b'\\xfe\\xc9', True\n sphincsshake192ssimple = b'\\xfe\\xca', True\n p384_sphincsshake192ssimple = b'\\xfe\\xcb', True\n sphincsshake256fsimple = b'\\xfe\\xcc', True\n p521_sphincsshake256fsimple = b'\\xfe\\xcd', True\n sphincsshake256ssimple = b'\\xfe\\xce', True\n p521_sphincsshake256ssimple = b'\\xfe\\xcf', True" }, { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" }, { "identifier": "CompressionMethod", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CompressionMethod(Enum):\n NULL = b'\\x00'\n DEFLATE = b'\\x01'" } ]
from enum import Enum from multiprocessing.pool import ThreadPool from typing import Iterable, Union, List, Optional, Iterator, Callable, Any from urllib.parse import urlparse from datetime import datetime, timezone from .protocol import ClientHello, ScanError, make_client_hello, parse_server_hello, ServerAlertError, BadServerResponse, ServerHello, logger from .names_and_numbers import AlertDescription, CipherSuite, Group, Protocol, CompressionMethod from OpenSSL import SSL, crypto import socket import re import dataclasses import ssl, select
14,302
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for.
# Default number of workers/threads/concurrent connections to use. DEFAULT_MAX_WORKERS: int = 6 # Default socket connection timeout, in seconds. DEFAULT_TIMEOUT: float = 2 class DowngradeError(ScanError): """ Error for servers that attempt to downgrade beyond supported versions. """ pass class ConnectionError(ScanError): """ Class for error in resolving or connecting to a server. """ pass class ProxyError(ConnectionError): """ Class for errors in connecting through a proxy. """ pass @dataclasses.dataclass class ConnectionSettings: """ Settings for a connection to a server, including the host, port, and proxy. """ host: str port: int = 443 proxy: Optional[str] = None timeout_in_seconds: Optional[float] = DEFAULT_TIMEOUT date: datetime = dataclasses.field(default_factory=lambda: datetime.now(tz=timezone.utc).replace(microsecond=0)) def make_socket(settings: ConnectionSettings) -> socket.socket: """ Creates and connects a socket to the target server, through the chosen proxy if any. """ socket_host, socket_port = None, None # To appease the type checker. try: if not settings.proxy: socket_host, socket_port = settings.host, settings.port return socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) if not settings.proxy.startswith('http://'): raise ProxyError("Only HTTP proxies are supported at the moment.", settings.proxy) socket_host, socket_port = parse_target(settings.proxy, 80) sock = socket.create_connection((socket_host, socket_port), timeout=settings.timeout_in_seconds) sock.send(f"CONNECT {settings.host}:{settings.port} HTTP/1.1\r\nhost:{socket_host}\r\n\r\n".encode('utf-8')) sock_file = sock.makefile('r', newline='\r\n') line = sock_file.readline() if not re.fullmatch(r'HTTP/1\.[01] 200 Connection [Ee]stablished\r\n', line): sock_file.close() sock.close() raise ProxyError("Proxy refused the connection: ", line) while True: if sock_file.readline() == '\r\n': break return sock except TimeoutError as e: raise ConnectionError(f"Connection to {socket_host}:{socket_port} timed out after {settings.timeout_in_seconds} seconds") from e except socket.gaierror as e: raise ConnectionError(f"Could not resolve host {socket_host}") from e except socket.error as e: raise ConnectionError(f"Could not connect to {socket_host}:{socket_port}") from e def send_hello(connection_settings: ConnectionSettings, client_hello: ClientHello) -> ServerHello: """ Sends a Client Hello to the server, and returns the parsed ServerHello. Raises exceptions for the different alert messages the server can send. """ sock = make_socket(connection_settings) sock.send(make_client_hello(client_hello)) packet_stream = iter(lambda: sock.recv(4096), b'') server_hello = parse_server_hello(packet_stream) if server_hello.version not in client_hello.protocols: # Server picked a protocol we didn't ask for.
logger.info(f"Server attempted to downgrade protocol to unsupported version {server_hello.version}")
0
2023-10-21 02:00:13+00:00
16k
YefanZhou/TempBalance
object_detection/src/YOLOv8/ultralytics/vit/sam/modules/mask_generator.py
[ { "identifier": "MaskData", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n \"\"\"Initialize a MaskData object, ensuring all values are supported types.\"\"\"\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.'\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n \"\"\"Set an item in the MaskData object, ensuring it is a supported type.\"\"\"\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)), 'MaskData only supports list, numpy arrays, and torch tensors.'\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n \"\"\"Delete an item from the MaskData object.\"\"\"\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n \"\"\"Get an item from the MaskData object.\"\"\"\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n \"\"\"Return an ItemsView of the MaskData object.\"\"\"\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n \"\"\"Filter the MaskData object based on the given boolean tensor.\"\"\"\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.')\n\n def cat(self, new_stats: 'MaskData') -> None:\n \"\"\"Concatenate a new MaskData object to the current one.\"\"\"\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f'MaskData key {k} has an unsupported type {type(v)}.')\n\n def to_numpy(self) -> None:\n \"\"\"Convert all torch tensors in the MaskData object to numpy arrays.\"\"\"\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n \"\"\"Calculate the area of a mask from its uncompressed RLE.\"\"\"\n return sum(rle['counts'][1::2])" }, { "identifier": "batch_iterator", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n \"\"\"Yield batches of data from the input arguments.\"\"\"\n assert args and all(len(a) == len(args[0]) for a in args), 'Batched iteration must have same-size inputs.'\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size:(b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n masks = masks.flatten(0, -3) if len(shape) > 2 else masks.unsqueeze(0)\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n return out.reshape(*shape[:-2], 4) if len(shape) > 2 else out[0]" }, { "identifier": "box_xyxy_to_xywh", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n \"\"\"Convert bounding boxes from XYXY format to XYWH format.\"\"\"\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def build_all_layer_point_grids(n_per_side: int, n_layers: int, scale_per_layer: int) -> List[np.ndarray]:\n \"\"\"Generate point grids for all crop layers.\"\"\"\n return [build_point_grid(int(n_per_side / (scale_per_layer ** i))) for i in range(n_layers + 1)]" }, { "identifier": "calculate_stability_score", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def calculate_stability_score(masks: torch.Tensor, mask_threshold: float, threshold_offset: float) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecessary cast to torch.int64\n intersections = ((masks > (mask_threshold + threshold_offset)).sum(-1, dtype=torch.int16).sum(-1,\n dtype=torch.int32))\n unions = ((masks > (mask_threshold - threshold_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32))\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Encode uncompressed RLE (run-length encoding) to COCO RLE format.\"\"\"\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle['size']\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle['counts'] = rle['counts'].decode('utf-8') # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def generate_crop_boxes(im_size: Tuple[int, ...], n_layers: int,\n overlap_ratio: float) -> Tuple[List[List[int]], List[int]]:\n \"\"\"Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.\"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n \"\"\"Crops bounding boxes to the size of the input image.\"\"\"\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def is_box_near_crop_edge(boxes: torch.Tensor,\n crop_box: List[int],\n orig_box: List[int],\n atol: float = 20.0) -> torch.Tensor:\n \"\"\"Return a boolean tensor indicating if boxes are near the crop edge.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"Encode masks as uncompressed RLEs in the format expected by pycocotools.\"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat([\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device), ])\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({'size': [h, w], 'counts': counts})\n return out" }, { "identifier": "remove_small_regions", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def remove_small_regions(mask: np.ndarray, area_thresh: float, mode: str) -> Tuple[np.ndarray, bool]:\n \"\"\"Remove small disconnected regions or holes in a mask, returning the mask and a modification indicator.\"\"\"\n import cv2 # type: ignore\n\n assert mode in {'holes', 'islands'}\n correct_holes = mode == 'holes'\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if not small_regions:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if not fill_labels:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle['size']\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle['counts']:\n mask[idx:idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n \"\"\"Uncrop bounding boxes by adding the crop box offset.\"\"\"\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_masks(masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int) -> torch.Tensor:\n \"\"\"Uncrop masks by padding them to the original image size.\"\"\"\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n \"\"\"Uncrop points by adding the crop box offset.\"\"\"\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" }, { "identifier": "PromptPredictor", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/modules/prompt_predictor.py", "snippet": "class PromptPredictor:\n\n def __init__(self, sam_model: Sam) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(self, image: np.ndarray, image_format: str = 'RGB') -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in {'RGB', 'BGR'}, f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(self, transformed_image: torch.Tensor, original_image_size: Tuple[int, ...]) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n if len(transformed_image.shape) != 4 \\\n or transformed_image.shape[1] != 3 \\\n or max(*transformed_image.shape[2:]) != self.model.image_encoder.img_size:\n raise ValueError('set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.')\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray, None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray, None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray, None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) before mask prediction.')\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (point_labels is not None), 'point_labels must be supplied if point_coords is supplied.'\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks_np = masks[0].detach().cpu().numpy()\n iou_predictions_np = iou_predictions[0].detach().cpu().numpy()\n low_res_masks_np = low_res_masks[0].detach().cpu().numpy()\n return masks_np, iou_predictions_np, low_res_masks_np\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor, None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor, None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n boxes (np.ndarray, None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) before mask prediction.')\n\n points = (point_coords, point_labels) if point_coords is not None else None\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError('An image must be set with .set_image(...) to generate an embedding.')\n assert self.features is not None, 'Features must exist if an image has been set.'\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "Sam", "path": "object_detection/src/YOLOv8/ultralytics/vit/sam/modules/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = 'RGB'\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer('pixel_mean', torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer('pixel_std', torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n @torch.no_grad()\n def forward(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input prompts,\n C is determined by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x['image']) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if 'point_coords' in image_record:\n points = (image_record['point_coords'], image_record['point_labels'])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get('boxes', None),\n masks=image_record.get('mask_inputs', None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record['image'].shape[-2:],\n original_size=image_record['original_size'],\n )\n masks = masks > self.mask_threshold\n outputs.append({\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks, })\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode='bilinear',\n align_corners=False,\n )\n masks = masks[..., :input_size[0], :input_size[1]]\n masks = F.interpolate(masks, original_size, mode='bilinear', align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n return F.pad(x, (0, padw, 0, padh))" } ]
from typing import Any, Dict, List, Optional, Tuple from torchvision.ops.boxes import batched_nms, box_area # type: ignore from ..amg import (MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points) from .prompt_predictor import PromptPredictor from .sam import Sam from pycocotools import mask as mask_utils # type: ignore # noqa: F401 import numpy as np import torch import cv2 # type: ignore # noqa: F401
11,298
return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = 'binary_mask', ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int, None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray), None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != (point_grids is None), \ 'Exactly one of points_per_side or point_grid must be provided.' if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in {'binary_mask', 'uncompressed_rle', 'coco_rle'}, f'Unknown output_mode {output_mode}.' if output_mode == 'coco_rle': if min_mask_region_area > 0: self.predictor = PromptPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode # TODO: Temporary implementation for compatibility def __call__(self, image: np.ndarray, augment=False, visualize=False) -> List[Dict[str, Any]]: return self.generate(image) @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any), np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == 'coco_rle': mask_data['segmentations'] = [coco_encode_rle(rle) for rle in mask_data['rles']] elif self.output_mode == 'binary_mask': mask_data['segmentations'] = [rle_to_mask(rle) for rle in mask_data['rles']] else: mask_data['segmentations'] = mask_data['rles'] # Write mask records curr_anns = [] for idx in range(len(mask_data['segmentations'])): ann = { 'segmentation': mask_data['segmentations'][idx], 'area': area_from_rle(mask_data['rles'][idx]), 'bbox': box_xyxy_to_xywh(mask_data['boxes'][idx]).tolist(), 'predicted_iou': mask_data['iou_preds'][idx].item(), 'point_coords': [mask_data['points'][idx].tolist()], 'stability_score': mask_data['stability_score'][idx].item(), 'crop_box': box_xyxy_to_xywh(mask_data['crop_boxes'][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes(orig_size, self.crop_n_layers, self.crop_overlap_ratio) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data['crop_boxes']) scores = scores.to(data['boxes'].device) keep_by_nms = batched_nms( data['boxes'].float(), scores, torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points, ) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data['boxes'].float(), data['iou_preds'], torch.zeros_like(data['boxes'][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data['boxes'] = uncrop_boxes_xyxy(data['boxes'], crop_box) data['points'] = uncrop_points(data['points'], crop_box) data['crop_boxes'] = torch.tensor([crop_box for _ in range(len(data['rles']))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data['iou_preds'] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data['stability_score'] = calculate_stability_score(data['masks'], self.predictor.model.mask_threshold, self.stability_score_offset) if self.stability_score_thresh > 0.0: keep_mask = data['stability_score'] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data['masks'] = data['masks'] > self.predictor.model.mask_threshold data['boxes'] = batched_mask_to_box(data['masks']) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data['boxes'], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE
data['masks'] = uncrop_masks(data['masks'], crop_box, orig_h, orig_w)
14
2023-10-24 00:45:55+00:00
16k
bytedance/ColTrack
models/dino/dino.py
[ { "identifier": "box_ops", "path": "util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef masks_to_boxes(masks):" }, { "identifier": "NestedTensor", "path": "util/misc.py", "snippet": "class NestedTensor(object):\n def __init__(self, tensors, mask: Optional[Tensor]):\n self.tensors = tensors\n self.mask = mask\n if mask == 'auto':\n self.mask = torch.zeros_like(tensors).to(tensors.device)\n if self.mask.dim() == 3:\n self.mask = self.mask.sum(0).to(bool)\n elif self.mask.dim() == 4:\n self.mask = self.mask.sum(1).to(bool)\n else:\n raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n def imgsize(self):\n res = []\n for i in range(self.tensors.shape[0]):\n mask = self.mask[i]\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n res.append(torch.Tensor([maxH, maxW]))\n return res\n\n def to(self, device):\n # type: (Device) -> NestedTensor # noqa\n cast_tensor = self.tensors.to(device)\n mask = self.mask\n if mask is not None:\n assert mask is not None\n cast_mask = mask.to(device)\n else:\n cast_mask = None\n return NestedTensor(cast_tensor, cast_mask)\n\n def to_img_list_single(self, tensor, mask):\n assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n maxH = (~mask).sum(0).max()\n maxW = (~mask).sum(1).max()\n img = tensor[:, :maxH, :maxW]\n return img\n\n def to_img_list(self):\n \"\"\"remove the padding and convert to img list\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.tensors.dim() == 3:\n return self.to_img_list_single(self.tensors, self.mask)\n else:\n res = []\n for i in range(self.tensors.shape[0]):\n tensor_i = self.tensors[i]\n mask_i = self.mask[i]\n res.append(self.to_img_list_single(tensor_i, mask_i))\n return res\n\n @property\n def device(self):\n return self.tensors.device\n\n def decompose(self):\n return self.tensors, self.mask\n\n def __repr__(self):\n return str(self.tensors)\n\n @property\n def shape(self):\n return {\n 'tensors.shape': self.tensors.shape,\n 'mask.shape': self.mask.shape\n }" }, { "identifier": "nested_tensor_from_tensor_list", "path": "util/misc.py", "snippet": "def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n # TODO make this more general\n if tensor_list[0].ndim == 3:\n if torchvision._is_tracing():\n # nested_tensor_from_tensor_list() does not export well to ONNX\n # call _onnx_nested_tensor_from_tensor_list() instead\n return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n # TODO make it support different-sized images\n max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n batch_shape = [len(tensor_list)] + max_size\n b, c, h, w = batch_shape\n dtype = tensor_list[0].dtype\n device = tensor_list[0].device\n tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n for img, pad_img, m in zip(tensor_list, tensor, mask):\n pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n m[: img.shape[1], :img.shape[2]] = False\n else:\n raise ValueError('not supported')\n return NestedTensor(tensor, mask)" }, { "identifier": "accuracy", "path": "util/misc.py", "snippet": "@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n if target.numel() == 0:\n return [torch.zeros([], device=output.device)]\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res" }, { "identifier": "get_world_size", "path": "util/misc.py", "snippet": "def get_world_size():\n if not is_dist_avail_and_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "interpolate", "path": "util/misc.py", "snippet": "def interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n \"\"\"\n Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n This will eventually be supported natively by PyTorch, and this\n class can go away.\n \"\"\"\n if __torchvision_need_compat_flag < 0.7:\n if input.numel() > 0:\n return torch.nn.functional.interpolate(\n input, size, scale_factor, mode, align_corners\n )\n\n output_shape = _output_size(2, input, size, scale_factor)\n output_shape = list(input.shape[:-2]) + list(output_shape)\n return _new_empty_tensor(input, output_shape)\n else:\n return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)" }, { "identifier": "is_dist_avail_and_initialized", "path": "util/misc.py", "snippet": "def is_dist_avail_and_initialized():\n if not dist.is_available():\n return False\n if not dist.is_initialized():\n return False\n return True" }, { "identifier": "inverse_sigmoid", "path": "util/misc.py", "snippet": "def inverse_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat(((x[..., :2] + 1) / 3, x[..., 2:] / 2), dim=-1)\n elif x.shape[-1] == 2:\n x = (x + 1) / 3\n else:\n raise ValueError\n x = x.clamp(min=0, max=1)\n x1 = x.clamp(min=eps)\n x2 = (1 - x).clamp(min=eps)\n return torch.log(x1/x2)" }, { "identifier": "scale_sigmoid", "path": "util/misc.py", "snippet": "def scale_sigmoid(x, eps=1e-3):\n if x.shape[-1] == 4:\n x = torch.cat((3 * (x[..., :2]) - 1, x[..., 2:] * 2), dim=-1)\n elif x.shape[-1] == 2:\n x = 3 * x - 1\n else:\n raise ValueError\n return x" }, { "identifier": "build_backbone", "path": "models/dino/backbone.py", "snippet": "def build_backbone(args):\n \"\"\"\n Useful args:\n - backbone: backbone name\n - lr_backbone: \n - dilation\n - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n - backbone_freeze_keywords: \n - use_checkpoint: for swin only for now\n\n \"\"\"\n position_embedding = build_position_encoding(args)\n train_backbone = args.lr_backbone > 0\n if not train_backbone:\n raise ValueError(\"Please set lr_backbone > 0\")\n return_interm_indices = args.return_interm_indices\n assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n backbone_freeze_keywords = args.backbone_freeze_keywords\n use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n if args.backbone in ['resnet50', 'resnet101']:\n backbone = Backbone(args.backbone, train_backbone, args.dilation, \n return_interm_indices, \n batch_norm=FrozenBatchNorm2d)\n bb_num_channels = backbone.num_channels\n elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n pretrain_img_size = int(args.backbone.split('_')[-2])\n backbone = build_swin_transformer(args.backbone, \\\n pretrain_img_size=pretrain_img_size, \\\n out_indices=tuple(return_interm_indices), \\\n dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n # freeze some layers\n if backbone_freeze_keywords is not None:\n for name, parameter in backbone.named_parameters():\n for keyword in backbone_freeze_keywords:\n if keyword in name:\n parameter.requires_grad_(False)\n break\n\n pretrained_dir = args.backbone_dir\n PTDICT = {\n 'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n 'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n 'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n }\n pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n from collections import OrderedDict\n def key_select_function(keyname):\n if 'head' in keyname:\n return False\n if args.dilation and 'layers.3' in keyname:\n return False\n return True\n _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n print(str(_tmp_st_output))\n bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n elif args.backbone in ['convnext_xlarge_22k']:\n backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n else:\n raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n \n\n assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n model = Joiner(backbone, position_embedding)\n model.num_channels = bb_num_channels \n assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n return model" }, { "identifier": "build_matcher", "path": "models/dino/matcher.py", "snippet": "def build_matcher(args):\n assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n if args.matcher_type == 'HungarianMatcher':\n return HungarianMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n )\n elif args.matcher_type == 'SimpleMinsumMatcher':\n return SimpleMinsumMatcher(\n cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n focal_alpha=args.focal_alpha\n ) \n else:\n raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))" }, { "identifier": "DETRsegm", "path": "models/dino/segmentation.py", "snippet": "class DETRsegm(nn.Module):\n def __init__(self, detr, freeze_detr=False):\n super().__init__()\n self.detr = detr\n\n if freeze_detr:\n for p in self.parameters():\n p.requires_grad_(False)\n\n hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n def forward(self, samples: NestedTensor):\n if isinstance(samples, (list, torch.Tensor)):\n samples = nested_tensor_from_tensor_list(samples)\n features, pos = self.detr.backbone(samples)\n\n bs = features[-1].tensors.shape[0]\n\n src, mask = features[-1].decompose()\n assert mask is not None\n src_proj = self.detr.input_proj(src)\n hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n outputs_class = self.detr.class_embed(hs)\n outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n if self.detr.aux_loss:\n out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n # FIXME h_boxes takes the last one computed, keep this in mind\n bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n out[\"pred_masks\"] = outputs_seg_masks\n return out" }, { "identifier": "PostProcessPanoptic", "path": "models/dino/segmentation.py", "snippet": "class PostProcessPanoptic(nn.Module):\n \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n coco panoptic API \"\"\"\n\n def __init__(self, is_thing_map, threshold=0.85):\n \"\"\"\n Parameters:\n is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n the class is a thing (True) or a stuff (False) class\n threshold: confidence threshold: segments with confidence lower than this will be deleted\n \"\"\"\n super().__init__()\n self.threshold = threshold\n self.is_thing_map = is_thing_map\n\n def forward(self, outputs, processed_sizes, target_sizes=None):\n \"\"\" This function computes the panoptic prediction from the model's predictions.\n Parameters:\n outputs: This is a dict coming directly from the model. See the model doc for the content.\n processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n model, ie the size after data augmentation but before batching.\n target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n of each prediction. If left to None, it will default to the processed_sizes\n \"\"\"\n if target_sizes is None:\n target_sizes = processed_sizes\n assert len(processed_sizes) == len(target_sizes)\n out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n assert len(out_logits) == len(raw_masks) == len(target_sizes)\n preds = []\n\n def to_tuple(tup):\n if isinstance(tup, tuple):\n return tup\n return tuple(tup.cpu().tolist())\n\n for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n ):\n # we filter empty queries and detection below threshold\n scores, labels = cur_logits.softmax(-1).max(-1)\n keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n cur_scores = cur_scores[keep]\n cur_classes = cur_classes[keep]\n cur_masks = cur_masks[keep]\n cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n h, w = cur_masks.shape[-2:]\n assert len(cur_boxes) == len(cur_classes)\n\n # It may be that we have several predicted masks for the same stuff class.\n # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n cur_masks = cur_masks.flatten(1)\n stuff_equiv_classes = defaultdict(lambda: [])\n for k, label in enumerate(cur_classes):\n if not self.is_thing_map[label.item()]:\n stuff_equiv_classes[label.item()].append(k)\n\n def get_ids_area(masks, scores, dedup=False):\n # This helper function creates the final panoptic segmentation image\n # It also returns the area of the masks that appears on the image\n\n m_id = masks.transpose(0, 1).softmax(-1)\n\n if m_id.shape[-1] == 0:\n # We didn't detect any mask :(\n m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n else:\n m_id = m_id.argmax(-1).view(h, w)\n\n if dedup:\n # Merge the masks corresponding to the same stuff class\n for equiv in stuff_equiv_classes.values():\n if len(equiv) > 1:\n for eq_id in equiv:\n m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n final_h, final_w = to_tuple(target_size)\n\n seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n np_seg_img = (\n torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n )\n m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n area = []\n for i in range(len(scores)):\n area.append(m_id.eq(i).sum().item())\n return area, seg_img\n\n area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n if cur_classes.numel() > 0:\n # We know filter empty masks as long as we find some\n while True:\n filtered_small = torch.as_tensor(\n [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n )\n if filtered_small.any().item():\n cur_scores = cur_scores[~filtered_small]\n cur_classes = cur_classes[~filtered_small]\n cur_masks = cur_masks[~filtered_small]\n area, seg_img = get_ids_area(cur_masks, cur_scores)\n else:\n break\n\n else:\n cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n segments_info = []\n for i, a in enumerate(area):\n cat = cur_classes[i].item()\n segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n del cur_classes\n\n with io.BytesIO() as out:\n seg_img.save(out, format=\"PNG\")\n predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n preds.append(predictions)\n return preds" }, { "identifier": "PostProcessSegm", "path": "models/dino/segmentation.py", "snippet": "class PostProcessSegm(nn.Module):\n def __init__(self, threshold=0.5):\n super().__init__()\n self.threshold = threshold\n\n @torch.no_grad()\n def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n assert len(orig_target_sizes) == len(max_target_sizes)\n max_h, max_w = max_target_sizes.max(0)[0].tolist()\n outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n img_h, img_w = t[0], t[1]\n results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n results[i][\"masks\"] = F.interpolate(\n results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n ).byte()\n\n return results" }, { "identifier": "dice_loss", "path": "models/dino/segmentation.py", "snippet": "def dice_loss(inputs, targets, num_boxes):\n \"\"\"\n Compute the DICE loss, similar to generalized IOU for masks\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n \"\"\"\n inputs = inputs.sigmoid()\n inputs = inputs.flatten(1)\n numerator = 2 * (inputs * targets).sum(1)\n denominator = inputs.sum(-1) + targets.sum(-1)\n loss = 1 - (numerator + 1) / (denominator + 1)\n return loss.sum() / num_boxes" }, { "identifier": "build_deformable_transformer", "path": "models/dino/deformable_transformer.py", "snippet": "def build_deformable_transformer(args):\n decoder_query_perturber = None\n if args.decoder_layer_noise:\n from .utils import RandomBoxPerturber\n decoder_query_perturber=RandomBoxPerturber(\n x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n use_detached_boxes_dec_out = False\n try:\n use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n except:\n use_detached_boxes_dec_out =False\n\n return DeformableTransformer(\n d_model=args.hidden_dim,\n dropout=args.dropout,\n nhead=args.nheads,\n num_queries=args.num_queries,\n dim_feedforward=args.dim_feedforward,\n num_encoder_layers=args.enc_layers,\n num_unicoder_layers=args.unic_layers,\n num_decoder_layers=args.dec_layers,\n normalize_before=args.pre_norm,\n return_intermediate_dec=True,\n query_dim=args.query_dim,\n activation=args.transformer_activation,\n num_patterns=args.num_patterns,\n modulate_hw_attn=True,\n\n deformable_encoder=True,\n deformable_decoder=True,\n num_feature_levels=args.num_feature_levels,\n enc_n_points=args.enc_n_points,\n dec_n_points=args.dec_n_points,\n use_deformable_box_attn=args.use_deformable_box_attn,\n box_attn_type=args.box_attn_type,\n\n learnable_tgt_init=True,\n decoder_query_perturber=decoder_query_perturber,\n\n add_channel_attention=args.add_channel_attention,\n add_pos_value=args.add_pos_value,\n random_refpoints_xy=args.random_refpoints_xy,\n\n # two stage\n two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n two_stage_pat_embed=args.two_stage_pat_embed,\n two_stage_add_query_num=args.two_stage_add_query_num,\n two_stage_learn_wh=args.two_stage_learn_wh,\n two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n dec_layer_number=args.dec_layer_number,\n rm_self_attn_layers=None,\n key_aware_type=None,\n layer_share_type=None,\n\n rm_detach=None,\n decoder_sa_type=args.decoder_sa_type,\n module_seq=args.decoder_module_seq,\n\n embed_init_tgt=args.embed_init_tgt,\n use_detached_boxes_dec_out=use_detached_boxes_dec_out\n )" }, { "identifier": "sigmoid_focal_loss", "path": "models/dino/utils.py", "snippet": "def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n \"\"\"\n Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n Args:\n inputs: A float tensor of arbitrary shape.\n The predictions for each example.\n targets: A float tensor with the same shape as inputs. Stores the binary\n classification label for each element in inputs\n (0 for the negative class and 1 for the positive class).\n alpha: (optional) Weighting factor in range (0,1) to balance\n positive vs negative examples. Default = -1 (no weighting).\n gamma: Exponent of the modulating factor (1 - p_t) to\n balance easy vs hard examples.\n Returns:\n Loss tensor\n \"\"\"\n prob = inputs.sigmoid()\n ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n p_t = prob * targets + (1 - prob) * (1 - targets)\n loss = ce_loss * ((1 - p_t) ** gamma)\n\n if alpha >= 0:\n alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n loss = alpha_t * loss\n\n\n return loss.mean(1).sum() / num_boxes" }, { "identifier": "MLP", "path": "models/dino/utils.py", "snippet": "class MLP(nn.Module):\n \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n super().__init__()\n self.num_layers = num_layers\n h = [hidden_dim] * (num_layers - 1)\n self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n def forward(self, x):\n for i, layer in enumerate(self.layers):\n x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n return x" }, { "identifier": "MODULE_BUILD_FUNCS", "path": "models/registry.py", "snippet": "MODULE_BUILD_FUNCS = Registry('model build functions')" }, { "identifier": "prepare_for_cdn", "path": "models/dino/dn_components.py", "snippet": "def prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n \"\"\"\n A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n forward function and use learnable tgt embedding, so we change this function a little bit.\n :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n :param training: if it is training or inference\n :param num_queries: number of queires\n :param num_classes: number of classes\n :param hidden_dim: transformer hidden dim\n :param label_enc: encode labels in dn\n :return:\n \"\"\"\n if training:\n targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n # positive and negative dn queries\n dn_number = dn_number * 2\n known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n batch_size = len(known)\n known_num = [sum(k) for k in known]\n if int(max(known_num)) == 0:\n dn_number = 1\n else:\n if dn_number >= 100:\n dn_number = dn_number // (int(max(known_num) * 2))\n elif dn_number < 1:\n dn_number = 1\n if dn_number == 0:\n dn_number = 1\n unmask_bbox = unmask_label = torch.cat(known)\n labels = torch.cat([t['labels'] for t in targets])\n boxes = torch.cat([t['boxes'] for t in targets])\n batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n known_indice = torch.nonzero(unmask_label + unmask_bbox)\n known_indice = known_indice.view(-1)\n\n known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n known_bboxs = boxes.repeat(2 * dn_number, 1)\n known_labels_expaned = known_labels.clone()\n known_bbox_expand = known_bboxs.clone()\n\n if label_noise_ratio > 0:\n p = torch.rand_like(known_labels_expaned.float())\n chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1) # half of bbox prob\n new_label = torch.randint_like(chosen_indice, 0, num_classes) # randomly put a new one here\n known_labels_expaned.scatter_(0, chosen_indice, new_label)\n single_pad = int(max(known_num))\n\n pad_size = int(single_pad * 2 * dn_number)\n positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n positive_idx = positive_idx.flatten()\n negative_idx = positive_idx + len(boxes)\n if box_noise_scale > 0:\n known_bbox_ = torch.zeros_like(known_bboxs)\n known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n diff = torch.zeros_like(known_bboxs)\n diff[:, :2] = known_bboxs[:, 2:] / 2\n diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n rand_part = torch.rand_like(known_bboxs)\n rand_part[negative_idx] += 1.0\n rand_part *= rand_sign\n known_bbox_ = known_bbox_ + torch.mul(rand_part,\n diff).cuda() * box_noise_scale\n # known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n m = known_labels_expaned.long().to('cuda')\n input_label_embed = label_enc(m)\n input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n input_query_label = padding_label.repeat(batch_size, 1, 1)\n input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n map_known_indice = torch.tensor([]).to('cuda')\n if len(known_num):\n map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num]) # [1,2, 1,2,3]\n map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n if len(known_bid):\n input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n tgt_size = pad_size + num_queries\n attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n # match query cannot see the reconstruct\n attn_mask[pad_size:, :pad_size] = True\n # reconstruct cannot see each other\n for i in range(dn_number):\n if i == 0:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n if i == dn_number - 1:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n else:\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n dn_meta = {\n 'pad_size': pad_size,\n 'num_dn_group': dn_number,\n }\n else:\n\n input_query_label = None\n input_query_bbox = None\n attn_mask = None\n dn_meta = None\n\n return input_query_label, input_query_bbox, attn_mask, dn_meta" }, { "identifier": "dn_post_process", "path": "models/dino/dn_components.py", "snippet": "def dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n \"\"\"\n post process of dn after output from the transformer\n put the dn part in the dn_meta\n \"\"\"\n if dn_meta and dn_meta['pad_size'] > 0:\n output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n if aux_loss:\n out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n dn_meta['output_known_lbs_bboxes'] = out\n return outputs_class, outputs_coord" } ]
import copy import math import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from util import box_ops from util.misc import (NestedTensor, nested_tensor_from_tensor_list, accuracy, get_world_size, interpolate, is_dist_avail_and_initialized, inverse_sigmoid, scale_sigmoid) from .backbone import build_backbone from .matcher import build_matcher from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, dice_loss) from .deformable_transformer import build_deformable_transformer from .utils import sigmoid_focal_loss, MLP from ..registry import MODULE_BUILD_FUNCS from .dn_components import prepare_for_cdn,dn_post_process
11,436
# is the maximum id for a class in your dataset. For example, # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. # As another example, for a dataset that has a single class with id 1, # you should pass `num_classes` to be 2 (max_obj_id + 1). # For more details on this, check the following discussion # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 # num_classes = 20 if args.dataset_file != 'coco' else 91 # if args.dataset_file == "coco_panoptic": # # for panoptic, we just add a num_classes that is large enough to hold # # max_obj_id + 1, but the exact value doesn't really matter # num_classes = 250 # if args.dataset_file == 'o365': # num_classes = 366 # if args.dataset_file == 'vanke': # num_classes = 51 num_classes = args.num_classes device = torch.device(args.device) backbone = build_backbone(args) transformer = build_deformable_transformer(args) try: match_unstable_error = args.match_unstable_error dn_labelbook_size = args.dn_labelbook_size except: match_unstable_error = True dn_labelbook_size = num_classes try: dec_pred_class_embed_share = args.dec_pred_class_embed_share except: dec_pred_class_embed_share = True try: dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share except: dec_pred_bbox_embed_share = True model = DINO( backbone, transformer, num_classes=num_classes, num_queries=args.num_queries, aux_loss=True, iter_update=True, query_dim=4, random_refpoints_xy=args.random_refpoints_xy, fix_refpoints_hw=args.fix_refpoints_hw, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_class_embed_share=dec_pred_class_embed_share, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, # two stage two_stage_type=args.two_stage_type, # box_share two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, decoder_sa_type=args.decoder_sa_type, num_patterns=args.num_patterns, dn_number = args.dn_number if args.use_dn else 0, dn_box_noise_scale = args.dn_box_noise_scale, dn_label_noise_ratio = args.dn_label_noise_ratio, dn_labelbook_size = dn_labelbook_size, ) if args.masks: model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) matcher = build_matcher(args) # prepare weight dict weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef} weight_dict['loss_giou'] = args.giou_loss_coef clean_weight_dict_wo_dn = copy.deepcopy(weight_dict) # for DN training if args.use_dn: weight_dict['loss_ce_dn'] = args.cls_loss_coef weight_dict['loss_bbox_dn'] = args.bbox_loss_coef weight_dict['loss_giou_dn'] = args.giou_loss_coef if args.masks: weight_dict["loss_mask"] = args.mask_loss_coef weight_dict["loss_dice"] = args.dice_loss_coef clean_weight_dict = copy.deepcopy(weight_dict) # TODO this is a hack if args.aux_loss: aux_weight_dict = {} for i in range(args.dec_layers - 1): aux_weight_dict.update({k + f'_{i}': v for k, v in clean_weight_dict.items()}) weight_dict.update(aux_weight_dict) if args.two_stage_type != 'no': interm_weight_dict = {} try: no_interm_box_loss = args.no_interm_box_loss except: no_interm_box_loss = False _coeff_weight_dict = { 'loss_ce': 1.0, 'loss_bbox': 1.0 if not no_interm_box_loss else 0.0, 'loss_giou': 1.0 if not no_interm_box_loss else 0.0, } try: interm_loss_coef = args.interm_loss_coef except: interm_loss_coef = 1.0 interm_weight_dict.update({k + f'_interm': v * interm_loss_coef * _coeff_weight_dict[k] for k, v in clean_weight_dict_wo_dn.items()}) weight_dict.update(interm_weight_dict) losses = ['labels', 'boxes', 'cardinality'] if args.masks: losses += ["masks"] criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, focal_alpha=args.focal_alpha, losses=losses, ) criterion.to(device) postprocessors = {'bbox': PostProcess(num_select=args.num_select, nms_iou_threshold=args.nms_iou_threshold)} if args.masks:
# ------------------------------------------------------------------------ # DINO # Copyright (c) 2022 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class DINO(nn.Module): """ This is the Cross-Attention Detector module that performs object detection """ def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False, iter_update=False, query_dim=2, random_refpoints_xy=False, fix_refpoints_hw=-1, num_feature_levels=1, nheads=8, # two stage two_stage_type='no', # ['no', 'standard'] two_stage_add_query_num=0, dec_pred_class_embed_share=True, dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, decoder_sa_type = 'sa', num_patterns = 0, dn_number = 100, dn_box_noise_scale = 0.4, dn_label_noise_ratio = 0.5, dn_labelbook_size = 100, ): """ Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_classes: number of object classes num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. fix_refpoints_hw: -1(default): learn w and h for each box seperately >0 : given fixed number -2 : learn a shared w and h """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.num_classes = num_classes self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim) # setting query dim self.query_dim = query_dim assert query_dim == 4 self.random_refpoints_xy = random_refpoints_xy self.fix_refpoints_hw = fix_refpoints_hw # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append(nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), )) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == 'no', "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList([ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), )]) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_class_embed_share = dec_pred_class_embed_share self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = nn.Linear(hidden_dim, num_classes) _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) # init the two embed layers prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) _class_embed.bias.data = torch.ones(self.num_classes) * bias_value nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)] if dec_pred_class_embed_share: class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] else: class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type self.two_stage_add_query_num = two_stage_add_query_num assert two_stage_type in ['no', 'standard'], "unknown param {} of two_stage_type".format(two_stage_type) if two_stage_type != 'no': if two_stage_bbox_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_class_embed_share and dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None if self.two_stage_add_query_num > 0: self.init_ref_points(two_stage_add_query_num) self.decoder_sa_type = decoder_sa_type assert decoder_sa_type in ['sa', 'ca_label', 'ca_content'] # self.replace_sa_with_double_ca = replace_sa_with_double_ca if decoder_sa_type == 'ca_label': self.label_embedding = nn.Embedding(num_classes, hidden_dim) for layer in self.transformer.decoder.layers: layer.label_embedding = self.label_embedding else: for layer in self.transformer.decoder.layers: layer.label_embedding = None self.label_embedding = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): raise NotImplementedError def forward(self, samples: NestedTensor, targets:List=None): """ The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) if self.dn_number > 0 or targets is not None: input_query_label, input_query_bbox, attn_mask, dn_meta =\ prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale), training=self.training,num_queries=self.num_queries,num_classes=self.num_classes, hidden_dim=self.hidden_dim,label_enc=self.label_enc) else: assert targets is None input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,input_query_label,attn_mask) # In case num object=0 hs[0]+=self.label_enc.weight[0,0]*0.0 outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = scale_sigmoid(layer_outputs_unsig.sigmoid()) outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) # outputs_class = self.class_embed(hs) outputs_class = torch.stack([layer_cls_embed(layer_hs) for layer_cls_embed, layer_hs in zip(self.class_embed, hs)]) if self.dn_number > 0 and dn_meta is not None: outputs_class, outputs_coord_list = \ dn_post_process(outputs_class, outputs_coord_list, dn_meta,self.aux_loss,self._set_aux_loss) out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]} if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1]) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # prepare enc outputs # import ipdb; ipdb.set_trace() if hs_enc.shape[0] > 1: enc_outputs_coord = [] enc_outputs_class = [] for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])): layer_enc_delta_unsig = layer_box_embed(layer_hs_enc) layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc) layer_enc_outputs_coord = scale_sigmoid(layer_enc_outputs_coord_unsig.sigmoid()) layer_enc_outputs_class = layer_class_embed(layer_hs_enc) enc_outputs_coord.append(layer_enc_outputs_coord) enc_outputs_class.append(layer_enc_outputs_class) # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1]) # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1] # enc_outputs_coord = enc_outputs_unsig.sigmoid() # enc_outputs_class = self.enc_class_embed(hs_enc[:-1]) out['enc_outputs'] = [ {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord) ] out['dn_meta'] = dn_meta return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] class SetCriterion(nn.Module): """ This class computes the loss for Conditional DETR. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) """ def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses): """ Create the criterion. Parameters: num_classes: number of object categories, omitting the special no-object category matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.num_classes = num_classes self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha def loss_labels(self, outputs, targets, indices, num_boxes, log=True): """Classification loss (Binary focal loss) targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] """ assert 'pred_logits' in outputs src_logits = outputs['pred_logits'] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full(src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2]+1], dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:,:,:-1] loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * src_logits.shape[1] losses = {'loss_ce': loss_ce} if log: # TODO this should probably be a separate loss, not hacked in this one here losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] """ assert "pred_masks" in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs["pred_masks"] src_masks = src_masks[src_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(src_masks) target_masks = target_masks[tgt_idx] # upsample predictions to the target size src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(src_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), "loss_dice": dice_loss(src_masks, target_masks, num_boxes), } return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.loss_labels, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, 'masks': self.loss_masks, # 'dn_labels': self.loss_dn_labels, # 'dn_boxes': self.loss_dn_boxes } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} device=next(iter(outputs.values())).device indices = self.matcher(outputs_without_aux, targets) if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device) if is_dist_avail_and_initialized(): torch.distributed.all_reduce(num_boxes) num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() # Compute all the requested losses losses = {} # prepare for dn loss dn_meta = outputs['dn_meta'] if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: output_known_lbs_bboxes,single_pad, scalar = self.prep_for_dn(dn_meta) dn_pos_idx = [] dn_neg_idx = [] for i in range(len(targets)): if len(targets[i]['labels']) > 0: t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda() t = t.unsqueeze(0).repeat(scalar, 1) tgt_idx = t.flatten() output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t output_idx = output_idx.flatten() else: output_idx = tgt_idx = torch.tensor([]).long().cuda() dn_pos_idx.append((output_idx, tgt_idx)) dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx)) output_known_lbs_bboxes=dn_meta['output_known_lbs_bboxes'] l_dict = {} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes*scalar,**kwargs)) l_dict = {k + f'_dn': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') losses.update(l_dict) for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for idx, aux_outputs in enumerate(outputs['aux_outputs']): indices = self.matcher(aux_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta: aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx] l_dict={} for loss in self.losses: kwargs = {} if 'labels' in loss: kwargs = {'log': False} l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes*scalar, **kwargs)) l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) else: l_dict = dict() l_dict['loss_bbox_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_giou_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_ce_dn']=torch.as_tensor(0.).to('cuda') l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda') l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda') l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda') l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = self.matcher(interm_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) # enc output loss if 'enc_outputs' in outputs: for i, enc_outputs in enumerate(outputs['enc_outputs']): indices = self.matcher(enc_outputs, targets) if return_indices: indices_list.append(indices) for loss in self.losses: if loss == 'masks': # Intermediate masks losses are too costly to compute, we ignore them. continue kwargs = {} if loss == 'labels': # Logging is enabled only for the last layer kwargs = {'log': False} l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses def prep_for_dn(self,dn_meta): output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes'] num_dn_groups,pad_size=dn_meta['num_dn_group'],dn_meta['pad_size'] assert pad_size % num_dn_groups==0 single_pad=pad_size//num_dn_groups return output_known_lbs_bboxes,single_pad,num_dn_groups class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100, nms_iou_threshold=-1) -> None: super().__init__() self.num_select = num_select self.nms_iou_threshold = nms_iou_threshold @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) if test: assert not not_to_xyxy boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] # import ipdb; ipdb.set_trace() results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results @MODULE_BUILD_FUNCS.registe_with_name(module_name='dino') def build_dino(args): # the `num_classes` naming here is somewhat misleading. # it indeed corresponds to `max_obj_id + 1`, where max_obj_id # is the maximum id for a class in your dataset. For example, # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. # As another example, for a dataset that has a single class with id 1, # you should pass `num_classes` to be 2 (max_obj_id + 1). # For more details on this, check the following discussion # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 # num_classes = 20 if args.dataset_file != 'coco' else 91 # if args.dataset_file == "coco_panoptic": # # for panoptic, we just add a num_classes that is large enough to hold # # max_obj_id + 1, but the exact value doesn't really matter # num_classes = 250 # if args.dataset_file == 'o365': # num_classes = 366 # if args.dataset_file == 'vanke': # num_classes = 51 num_classes = args.num_classes device = torch.device(args.device) backbone = build_backbone(args) transformer = build_deformable_transformer(args) try: match_unstable_error = args.match_unstable_error dn_labelbook_size = args.dn_labelbook_size except: match_unstable_error = True dn_labelbook_size = num_classes try: dec_pred_class_embed_share = args.dec_pred_class_embed_share except: dec_pred_class_embed_share = True try: dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share except: dec_pred_bbox_embed_share = True model = DINO( backbone, transformer, num_classes=num_classes, num_queries=args.num_queries, aux_loss=True, iter_update=True, query_dim=4, random_refpoints_xy=args.random_refpoints_xy, fix_refpoints_hw=args.fix_refpoints_hw, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_class_embed_share=dec_pred_class_embed_share, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, # two stage two_stage_type=args.two_stage_type, # box_share two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, decoder_sa_type=args.decoder_sa_type, num_patterns=args.num_patterns, dn_number = args.dn_number if args.use_dn else 0, dn_box_noise_scale = args.dn_box_noise_scale, dn_label_noise_ratio = args.dn_label_noise_ratio, dn_labelbook_size = dn_labelbook_size, ) if args.masks: model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) matcher = build_matcher(args) # prepare weight dict weight_dict = {'loss_ce': args.cls_loss_coef, 'loss_bbox': args.bbox_loss_coef} weight_dict['loss_giou'] = args.giou_loss_coef clean_weight_dict_wo_dn = copy.deepcopy(weight_dict) # for DN training if args.use_dn: weight_dict['loss_ce_dn'] = args.cls_loss_coef weight_dict['loss_bbox_dn'] = args.bbox_loss_coef weight_dict['loss_giou_dn'] = args.giou_loss_coef if args.masks: weight_dict["loss_mask"] = args.mask_loss_coef weight_dict["loss_dice"] = args.dice_loss_coef clean_weight_dict = copy.deepcopy(weight_dict) # TODO this is a hack if args.aux_loss: aux_weight_dict = {} for i in range(args.dec_layers - 1): aux_weight_dict.update({k + f'_{i}': v for k, v in clean_weight_dict.items()}) weight_dict.update(aux_weight_dict) if args.two_stage_type != 'no': interm_weight_dict = {} try: no_interm_box_loss = args.no_interm_box_loss except: no_interm_box_loss = False _coeff_weight_dict = { 'loss_ce': 1.0, 'loss_bbox': 1.0 if not no_interm_box_loss else 0.0, 'loss_giou': 1.0 if not no_interm_box_loss else 0.0, } try: interm_loss_coef = args.interm_loss_coef except: interm_loss_coef = 1.0 interm_weight_dict.update({k + f'_interm': v * interm_loss_coef * _coeff_weight_dict[k] for k, v in clean_weight_dict_wo_dn.items()}) weight_dict.update(interm_weight_dict) losses = ['labels', 'boxes', 'cardinality'] if args.masks: losses += ["masks"] criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, focal_alpha=args.focal_alpha, losses=losses, ) criterion.to(device) postprocessors = {'bbox': PostProcess(num_select=args.num_select, nms_iou_threshold=args.nms_iou_threshold)} if args.masks:
postprocessors['segm'] = PostProcessSegm()
13
2023-10-16 02:18:33+00:00
16k
YuroFR/freqtrade-modded-crypto-trading-bot
tests/commands/test_commands.py
[ { "identifier": "start_convert_data", "path": "freqtrade/commands/data_commands.py", "snippet": "def start_convert_data(args: Dict[str, Any], ohlcv: bool = True) -> None:\n \"\"\"\n Convert data from one format to another\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n if ohlcv:\n migrate_binance_futures_data(config)\n convert_ohlcv_format(config,\n convert_from=args['format_from'],\n convert_to=args['format_to'],\n erase=args['erase'])\n else:\n convert_trades_format(config,\n convert_from=args['format_from_trades'], convert_to=args['format_to'],\n erase=args['erase'])" }, { "identifier": "start_convert_trades", "path": "freqtrade/commands/data_commands.py", "snippet": "def start_convert_trades(args: Dict[str, Any]) -> None:\n\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n\n timerange = TimeRange()\n\n # Remove stake-currency to skip checks which are not relevant for datadownload\n config['stake_currency'] = ''\n\n if 'timeframes' not in config:\n config['timeframes'] = DL_DATA_TIMEFRAMES\n\n # Init exchange\n exchange = ExchangeResolver.load_exchange(config, validate=False)\n # Manual validations of relevant settings\n\n for timeframe in config['timeframes']:\n exchange.validate_timeframes(timeframe)\n\n # Convert downloaded trade data to different timeframes\n convert_trades_to_ohlcv(\n pairs=config.get('pairs', []), timeframes=config['timeframes'],\n datadir=config['datadir'], timerange=timerange, erase=bool(config.get('erase')),\n data_format_ohlcv=config['dataformat_ohlcv'],\n data_format_trades=config['dataformat_trades'],\n )" }, { "identifier": "start_download_data", "path": "freqtrade/commands/data_commands.py", "snippet": "def start_download_data(args: Dict[str, Any]) -> None:\n \"\"\"\n Download data (former download_backtest_data.py script)\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n\n _check_data_config_download_sanity(config)\n\n try:\n download_data_main(config)\n\n except KeyboardInterrupt:\n sys.exit(\"SIGINT received, aborting ...\")" }, { "identifier": "start_list_data", "path": "freqtrade/commands/data_commands.py", "snippet": "def start_list_data(args: Dict[str, Any]) -> None:\n \"\"\"\n List available backtest data\n \"\"\"\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n from tabulate import tabulate\n\n from freqtrade.data.history.idatahandler import get_datahandler\n dhc = get_datahandler(config['datadir'], config['dataformat_ohlcv'])\n\n paircombs = dhc.ohlcv_get_available_data(\n config['datadir'],\n config.get('trading_mode', TradingMode.SPOT)\n )\n\n if args['pairs']:\n paircombs = [comb for comb in paircombs if comb[0] in args['pairs']]\n\n print(f\"Found {len(paircombs)} pair / timeframe combinations.\")\n if not config.get('show_timerange'):\n groupedpair = defaultdict(list)\n for pair, timeframe, candle_type in sorted(\n paircombs,\n key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2])\n ):\n groupedpair[(pair, candle_type)].append(timeframe)\n\n if groupedpair:\n print(tabulate([\n (pair, ', '.join(timeframes), candle_type)\n for (pair, candle_type), timeframes in groupedpair.items()\n ],\n headers=(\"Pair\", \"Timeframe\", \"Type\"),\n tablefmt='psql', stralign='right'))\n else:\n paircombs1 = [(\n pair, timeframe, candle_type,\n *dhc.ohlcv_data_min_max(pair, timeframe, candle_type)\n ) for pair, timeframe, candle_type in paircombs]\n\n print(tabulate([\n (pair, timeframe, candle_type,\n start.strftime(DATETIME_PRINT_FORMAT),\n end.strftime(DATETIME_PRINT_FORMAT))\n for pair, timeframe, candle_type, start, end in sorted(\n paircombs1,\n key=lambda x: (x[0], timeframe_to_minutes(x[1]), x[2]))\n ],\n headers=(\"Pair\", \"Timeframe\", \"Type\", 'From', 'To'),\n tablefmt='psql', stralign='right'))" }, { "identifier": "start_create_userdir", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def start_create_userdir(args: Dict[str, Any]) -> None:\n \"\"\"\n Create \"user_data\" directory to contain user data strategies, hyperopt, ...)\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n if \"user_data_dir\" in args and args[\"user_data_dir\"]:\n userdir = create_userdata_dir(args[\"user_data_dir\"], create_dir=True)\n copy_sample_files(userdir, overwrite=args[\"reset\"])\n else:\n logger.warning(\"`create-userdir` requires --userdir to be set.\")\n sys.exit(1)" }, { "identifier": "start_install_ui", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def start_install_ui(args: Dict[str, Any]) -> None:\n\n dest_folder = Path(__file__).parents[1] / 'rpc/api_server/ui/installed/'\n # First make sure the assets are removed.\n dl_url, latest_version = get_ui_download_url(args.get('ui_version'))\n\n curr_version = read_ui_version(dest_folder)\n if curr_version == latest_version and not args.get('erase_ui_only'):\n logger.info(f\"UI already up-to-date, FreqUI Version {curr_version}.\")\n return\n\n clean_ui_subdir(dest_folder)\n if args.get('erase_ui_only'):\n logger.info(\"Erased UI directory content. Not downloading new version.\")\n else:\n # Download a new version\n download_and_install_ui(dest_folder, dl_url, latest_version)" }, { "identifier": "start_new_strategy", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def start_new_strategy(args: Dict[str, Any]) -> None:\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n if \"strategy\" in args and args[\"strategy\"]:\n\n new_path = config['user_data_dir'] / USERPATH_STRATEGIES / (args['strategy'] + '.py')\n\n if new_path.exists():\n raise OperationalException(f\"`{new_path}` already exists. \"\n \"Please choose another Strategy Name.\")\n\n deploy_new_strategy(args['strategy'], new_path, args['template'])\n\n else:\n raise OperationalException(\"`new-strategy` requires --strategy to be set.\")" }, { "identifier": "start_hyperopt_list", "path": "freqtrade/commands/hyperopt_commands.py", "snippet": "def start_hyperopt_list(args: Dict[str, Any]) -> None:\n \"\"\"\n List hyperopt epochs previously evaluated\n \"\"\"\n from freqtrade.optimize.hyperopt_tools import HyperoptTools\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n print_colorized = config.get('print_colorized', False)\n print_json = config.get('print_json', False)\n export_csv = config.get('export_csv')\n no_details = config.get('hyperopt_list_no_details', False)\n no_header = False\n\n results_file = get_latest_hyperopt_file(\n config['user_data_dir'] / 'hyperopt_results',\n config.get('hyperoptexportfilename'))\n\n # Previous evaluations\n epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config)\n\n if print_colorized:\n colorama_init(autoreset=True)\n\n if not export_csv:\n try:\n print(HyperoptTools.get_result_table(config, epochs, total_epochs,\n not config.get('hyperopt_list_best', False),\n print_colorized, 0))\n except KeyboardInterrupt:\n print('User interrupted..')\n\n if epochs and not no_details:\n sorted_epochs = sorted(epochs, key=itemgetter('loss'))\n results = sorted_epochs[0]\n HyperoptTools.show_epoch_details(results, total_epochs, print_json, no_header)\n\n if epochs and export_csv:\n HyperoptTools.export_csv_file(\n config, epochs, export_csv\n )" }, { "identifier": "start_hyperopt_show", "path": "freqtrade/commands/hyperopt_commands.py", "snippet": "def start_hyperopt_show(args: Dict[str, Any]) -> None:\n \"\"\"\n Show details of a hyperopt epoch previously evaluated\n \"\"\"\n from freqtrade.optimize.hyperopt_tools import HyperoptTools\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n print_json = config.get('print_json', False)\n no_header = config.get('hyperopt_show_no_header', False)\n results_file = get_latest_hyperopt_file(\n config['user_data_dir'] / 'hyperopt_results',\n config.get('hyperoptexportfilename'))\n\n n = config.get('hyperopt_show_index', -1)\n\n # Previous evaluations\n epochs, total_epochs = HyperoptTools.load_filtered_results(results_file, config)\n\n filtered_epochs = len(epochs)\n\n if n > filtered_epochs:\n raise OperationalException(\n f\"The index of the epoch to show should be less than {filtered_epochs + 1}.\")\n if n < -filtered_epochs:\n raise OperationalException(\n f\"The index of the epoch to show should be greater than {-filtered_epochs - 1}.\")\n\n # Translate epoch index from human-readable format to pythonic\n if n > 0:\n n -= 1\n\n if epochs:\n val = epochs[n]\n\n metrics = val['results_metrics']\n if 'strategy_name' in metrics:\n strategy_name = metrics['strategy_name']\n show_backtest_result(strategy_name, metrics,\n metrics['stake_currency'], config.get('backtest_breakdown', []))\n\n HyperoptTools.try_export_params(config, strategy_name, val)\n\n HyperoptTools.show_epoch_details(val, total_epochs, print_json, no_header,\n header_str=\"Epoch details\")" }, { "identifier": "start_list_exchanges", "path": "freqtrade/commands/list_commands.py", "snippet": "def start_list_exchanges(args: Dict[str, Any]) -> None:\n \"\"\"\n Print available exchanges\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n exchanges = list_available_exchanges(args['list_exchanges_all'])\n\n if args['print_one_column']:\n print('\\n'.join([e['name'] for e in exchanges]))\n else:\n headers = {\n 'name': 'Exchange name',\n 'supported': 'Supported',\n 'trade_modes': 'Markets',\n 'comment': 'Reason',\n }\n headers.update({'valid': 'Valid'} if args['list_exchanges_all'] else {})\n\n def build_entry(exchange: ValidExchangesType, valid: bool):\n valid_entry = {'valid': exchange['valid']} if valid else {}\n result: Dict[str, Union[str, bool]] = {\n 'name': exchange['name'],\n **valid_entry,\n 'supported': 'Official' if exchange['supported'] else '',\n 'trade_modes': ', '.join(\n (f\"{a['margin_mode']} \" if a['margin_mode'] else '') + a['trading_mode']\n for a in exchange['trade_modes']\n ),\n 'comment': exchange['comment'],\n }\n\n return result\n\n if args['list_exchanges_all']:\n print(\"All exchanges supported by the ccxt library:\")\n exchanges = [build_entry(e, True) for e in exchanges]\n else:\n print(\"Exchanges available for Freqtrade:\")\n exchanges = [build_entry(e, False) for e in exchanges if e['valid'] is not False]\n\n print(tabulate(exchanges, headers=headers, ))" }, { "identifier": "start_list_markets", "path": "freqtrade/commands/list_commands.py", "snippet": "def start_list_markets(args: Dict[str, Any], pairs_only: bool = False) -> None:\n \"\"\"\n Print pairs/markets on the exchange\n :param args: Cli args from Arguments()\n :param pairs_only: if True print only pairs, otherwise print all instruments (markets)\n :return: None\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n\n # Init exchange\n exchange = ExchangeResolver.load_exchange(config, validate=False)\n\n # By default only active pairs/markets are to be shown\n active_only = not args.get('list_pairs_all', False)\n\n base_currencies = args.get('base_currencies', [])\n quote_currencies = args.get('quote_currencies', [])\n\n try:\n pairs = exchange.get_markets(base_currencies=base_currencies,\n quote_currencies=quote_currencies,\n tradable_only=pairs_only,\n active_only=active_only)\n # Sort the pairs/markets by symbol\n pairs = dict(sorted(pairs.items()))\n except Exception as e:\n raise OperationalException(f\"Cannot get markets. Reason: {e}\") from e\n\n else:\n summary_str = ((f\"Exchange {exchange.name} has {len(pairs)} \") +\n (\"active \" if active_only else \"\") +\n (plural(len(pairs), \"pair\" if pairs_only else \"market\")) +\n (f\" with {', '.join(base_currencies)} as base \"\n f\"{plural(len(base_currencies), 'currency', 'currencies')}\"\n if base_currencies else \"\") +\n (\" and\" if base_currencies and quote_currencies else \"\") +\n (f\" with {', '.join(quote_currencies)} as quote \"\n f\"{plural(len(quote_currencies), 'currency', 'currencies')}\"\n if quote_currencies else \"\"))\n\n headers = [\"Id\", \"Symbol\", \"Base\", \"Quote\", \"Active\",\n \"Spot\", \"Margin\", \"Future\", \"Leverage\"]\n\n tabular_data = [{\n 'Id': v['id'],\n 'Symbol': v['symbol'],\n 'Base': v['base'],\n 'Quote': v['quote'],\n 'Active': market_is_active(v),\n 'Spot': 'Spot' if exchange.market_is_spot(v) else '',\n 'Margin': 'Margin' if exchange.market_is_margin(v) else '',\n 'Future': 'Future' if exchange.market_is_future(v) else '',\n 'Leverage': exchange.get_max_leverage(v['symbol'], 20)\n } for _, v in pairs.items()]\n\n if (args.get('print_one_column', False) or\n args.get('list_pairs_print_json', False) or\n args.get('print_csv', False)):\n # Print summary string in the log in case of machine-readable\n # regular formats.\n logger.info(f\"{summary_str}.\")\n else:\n # Print empty string separating leading logs and output in case of\n # human-readable formats.\n print()\n\n if pairs:\n if args.get('print_list', False):\n # print data as a list, with human-readable summary\n print(f\"{summary_str}: {', '.join(pairs.keys())}.\")\n elif args.get('print_one_column', False):\n print('\\n'.join(pairs.keys()))\n elif args.get('list_pairs_print_json', False):\n print(rapidjson.dumps(list(pairs.keys()), default=str))\n elif args.get('print_csv', False):\n writer = csv.DictWriter(sys.stdout, fieldnames=headers)\n writer.writeheader()\n writer.writerows(tabular_data)\n else:\n # print data as a table, with the human-readable summary\n print(f\"{summary_str}:\")\n print(tabulate(tabular_data, headers='keys', tablefmt='psql', stralign='right'))\n elif not (args.get('print_one_column', False) or\n args.get('list_pairs_print_json', False) or\n args.get('print_csv', False)):\n print(f\"{summary_str}.\")" }, { "identifier": "start_list_strategies", "path": "freqtrade/commands/list_commands.py", "snippet": "def start_list_strategies(args: Dict[str, Any]) -> None:\n \"\"\"\n Print files with Strategy custom classes available in the directory\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n strategy_objs = StrategyResolver.search_all_objects(\n config, not args['print_one_column'], config.get('recursive_strategy_search', False))\n # Sort alphabetically\n strategy_objs = sorted(strategy_objs, key=lambda x: x['name'])\n for obj in strategy_objs:\n if obj['class']:\n obj['hyperoptable'] = obj['class'].detect_all_parameters()\n else:\n obj['hyperoptable'] = {'count': 0}\n\n if args['print_one_column']:\n print('\\n'.join([s['name'] for s in strategy_objs]))\n else:\n _print_objs_tabular(strategy_objs, config.get('print_colorized', False))" }, { "identifier": "start_list_timeframes", "path": "freqtrade/commands/list_commands.py", "snippet": "def start_list_timeframes(args: Dict[str, Any]) -> None:\n \"\"\"\n Print timeframes available on Exchange\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n # Do not use timeframe set in the config\n config['timeframe'] = None\n\n # Init exchange\n exchange = ExchangeResolver.load_exchange(config, validate=False)\n\n if args['print_one_column']:\n print('\\n'.join(exchange.timeframes))\n else:\n print(f\"Timeframes available for the exchange `{exchange.name}`: \"\n f\"{', '.join(exchange.timeframes)}\")" }, { "identifier": "start_show_trades", "path": "freqtrade/commands/list_commands.py", "snippet": "def start_show_trades(args: Dict[str, Any]) -> None:\n \"\"\"\n Show trades\n \"\"\"\n import json\n\n from freqtrade.persistence import Trade, init_db\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n if 'db_url' not in config:\n raise OperationalException(\"--db-url is required for this command.\")\n\n logger.info(f'Using DB: \"{parse_db_uri_for_logging(config[\"db_url\"])}\"')\n init_db(config['db_url'])\n tfilter = []\n\n if config.get('trade_ids'):\n tfilter.append(Trade.id.in_(config['trade_ids']))\n\n trades = Trade.get_trades(tfilter).all()\n logger.info(f\"Printing {len(trades)} Trades: \")\n if config.get('print_json', False):\n print(json.dumps([trade.to_json() for trade in trades], indent=4))\n else:\n for trade in trades:\n print(trade)" }, { "identifier": "start_backtesting_show", "path": "freqtrade/commands/optimize_commands.py", "snippet": "def start_backtesting_show(args: Dict[str, Any]) -> None:\n \"\"\"\n Show previous backtest result\n \"\"\"\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n from freqtrade.data.btanalysis import load_backtest_stats\n from freqtrade.optimize.optimize_reports import show_backtest_results, show_sorted_pairlist\n\n results = load_backtest_stats(config['exportfilename'])\n\n show_backtest_results(config, results)\n show_sorted_pairlist(config, results)" }, { "identifier": "start_test_pairlist", "path": "freqtrade/commands/pairlist_commands.py", "snippet": "def start_test_pairlist(args: Dict[str, Any]) -> None:\n \"\"\"\n Test Pairlist configuration\n \"\"\"\n from freqtrade.plugins.pairlistmanager import PairListManager\n config = setup_utils_configuration(args, RunMode.UTIL_EXCHANGE)\n\n exchange = ExchangeResolver.load_exchange(config, validate=False)\n\n quote_currencies = args.get('quote_currencies')\n if not quote_currencies:\n quote_currencies = [config.get('stake_currency')]\n results = {}\n for curr in quote_currencies:\n config['stake_currency'] = curr\n pairlists = PairListManager(exchange, config)\n pairlists.refresh_pairlist()\n results[curr] = pairlists.whitelist\n\n for curr, pairlist in results.items():\n if not args.get('print_one_column', False) and not args.get('list_pairs_print_json', False):\n print(f\"Pairs for {curr}: \")\n\n if args.get('print_one_column', False):\n print('\\n'.join(pairlist))\n elif args.get('list_pairs_print_json', False):\n print(rapidjson.dumps(list(pairlist), default=str))\n else:\n print(pairlist)" }, { "identifier": "start_strategy_update", "path": "freqtrade/commands/strategy_utils_commands.py", "snippet": "def start_strategy_update(args: Dict[str, Any]) -> None:\n \"\"\"\n Start the strategy updating script\n :param args: Cli args from Arguments()\n :return: None\n \"\"\"\n\n if sys.version_info == (3, 8): # pragma: no cover\n sys.exit(\"Freqtrade strategy updater requires Python version >= 3.9\")\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n strategy_objs = StrategyResolver.search_all_objects(\n config, enum_failed=False, recursive=config.get('recursive_strategy_search', False))\n\n filtered_strategy_objs = []\n if args['strategy_list']:\n filtered_strategy_objs = [\n strategy_obj for strategy_obj in strategy_objs\n if strategy_obj['name'] in args['strategy_list']\n ]\n\n else:\n # Use all available entries.\n filtered_strategy_objs = strategy_objs\n\n processed_locations = set()\n for strategy_obj in filtered_strategy_objs:\n if strategy_obj['location'] not in processed_locations:\n processed_locations.add(strategy_obj['location'])\n start_conversion(strategy_obj, config)" }, { "identifier": "start_trading", "path": "freqtrade/commands/trade_commands.py", "snippet": "def start_trading(args: Dict[str, Any]) -> int:\n \"\"\"\n Main entry point for trading mode\n \"\"\"\n # Import here to avoid loading worker module when it's not used\n from freqtrade.worker import Worker\n\n def term_handler(signum, frame):\n # Raise KeyboardInterrupt - so we can handle it in the same way as Ctrl-C\n raise KeyboardInterrupt()\n\n # Create and run worker\n worker = None\n try:\n signal.signal(signal.SIGTERM, term_handler)\n worker = Worker(args)\n worker.run()\n except Exception as e:\n logger.error(str(e))\n logger.exception(\"Fatal exception!\")\n except (KeyboardInterrupt):\n logger.info('SIGINT received, aborting ...')\n finally:\n if worker:\n logger.info(\"worker found ... calling exit\")\n worker.exit()\n return 0" }, { "identifier": "start_webserver", "path": "freqtrade/commands/webserver_commands.py", "snippet": "def start_webserver(args: Dict[str, Any]) -> None:\n \"\"\"\n Main entry point for webserver mode\n \"\"\"\n from freqtrade.configuration import setup_utils_configuration\n from freqtrade.rpc.api_server import ApiServer\n\n # Initialize configuration\n\n config = setup_utils_configuration(args, RunMode.WEBSERVER)\n ApiServer(config, standalone=True)" }, { "identifier": "start_convert_db", "path": "freqtrade/commands/db_commands.py", "snippet": "def start_convert_db(args: Dict[str, Any]) -> None:\n from sqlalchemy.orm import make_transient\n\n from freqtrade.persistence import Order, Trade, init_db\n from freqtrade.persistence.migrations import set_sequence_ids\n from freqtrade.persistence.pairlock import PairLock\n\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n\n init_db(config['db_url'])\n session_target = Trade.session\n init_db(config['db_url_from'])\n logger.info(\"Starting db migration.\")\n\n trade_count = 0\n pairlock_count = 0\n for trade in Trade.get_trades():\n trade_count += 1\n make_transient(trade)\n for o in trade.orders:\n make_transient(o)\n\n session_target.add(trade)\n\n session_target.commit()\n\n for pairlock in PairLock.get_all_locks():\n pairlock_count += 1\n make_transient(pairlock)\n session_target.add(pairlock)\n session_target.commit()\n\n # Update sequences\n max_trade_id = session_target.scalar(select(func.max(Trade.id)))\n max_order_id = session_target.scalar(select(func.max(Order.id)))\n max_pairlock_id = session_target.scalar(select(func.max(PairLock.id)))\n\n set_sequence_ids(session_target.get_bind(),\n trade_id=max_trade_id,\n order_id=max_order_id,\n pairlock_id=max_pairlock_id)\n\n logger.info(f\"Migrated {trade_count} Trades, and {pairlock_count} Pairlocks.\")" }, { "identifier": "clean_ui_subdir", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def clean_ui_subdir(directory: Path):\n if directory.is_dir():\n logger.info(\"Removing UI directory content.\")\n\n for p in reversed(list(directory.glob('**/*'))): # iterate contents from leaves to root\n if p.name in ('.gitkeep', 'fallback_file.html'):\n continue\n if p.is_file():\n p.unlink()\n elif p.is_dir():\n p.rmdir()" }, { "identifier": "download_and_install_ui", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def download_and_install_ui(dest_folder: Path, dl_url: str, version: str):\n from io import BytesIO\n from zipfile import ZipFile\n\n logger.info(f\"Downloading {dl_url}\")\n resp = requests.get(dl_url).content\n dest_folder.mkdir(parents=True, exist_ok=True)\n with ZipFile(BytesIO(resp)) as zf:\n for fn in zf.filelist:\n with zf.open(fn) as x:\n destfile = dest_folder / fn.filename\n if fn.is_dir():\n destfile.mkdir(exist_ok=True)\n else:\n destfile.write_bytes(x.read())\n with (dest_folder / '.uiversion').open('w') as f:\n f.write(version)" }, { "identifier": "get_ui_download_url", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def get_ui_download_url(version: Optional[str] = None) -> Tuple[str, str]:\n base_url = 'https://api.github.com/repos/freqtrade/frequi/'\n # Get base UI Repo path\n\n resp = requests.get(f\"{base_url}releases\")\n resp.raise_for_status()\n r = resp.json()\n\n if version:\n tmp = [x for x in r if x['name'] == version]\n if tmp:\n latest_version = tmp[0]['name']\n assets = tmp[0].get('assets', [])\n else:\n raise ValueError(\"UI-Version not found.\")\n else:\n latest_version = r[0]['name']\n assets = r[0].get('assets', [])\n dl_url = ''\n if assets and len(assets) > 0:\n dl_url = assets[0]['browser_download_url']\n\n # URL not found - try assets url\n if not dl_url:\n assets = r[0]['assets_url']\n resp = requests.get(assets)\n r = resp.json()\n dl_url = r[0]['browser_download_url']\n\n return dl_url, latest_version" }, { "identifier": "read_ui_version", "path": "freqtrade/commands/deploy_commands.py", "snippet": "def read_ui_version(dest_folder: Path) -> Optional[str]:\n file = dest_folder / '.uiversion'\n if not file.is_file():\n return None\n\n with file.open('r') as f:\n return f.read()" }, { "identifier": "start_list_freqAI_models", "path": "freqtrade/commands/list_commands.py", "snippet": "def start_list_freqAI_models(args: Dict[str, Any]) -> None:\n \"\"\"\n Print files with FreqAI models custom classes available in the directory\n \"\"\"\n config = setup_utils_configuration(args, RunMode.UTIL_NO_EXCHANGE)\n from freqtrade.resolvers.freqaimodel_resolver import FreqaiModelResolver\n model_objs = FreqaiModelResolver.search_all_objects(config, not args['print_one_column'])\n # Sort alphabetically\n model_objs = sorted(model_objs, key=lambda x: x['name'])\n if args['print_one_column']:\n print('\\n'.join([s['name'] for s in model_objs]))\n else:\n _print_objs_tabular(model_objs, config.get('print_colorized', False))" }, { "identifier": "setup_utils_configuration", "path": "freqtrade/configuration/config_setup.py", "snippet": "def setup_utils_configuration(args: Dict[str, Any], method: RunMode) -> Dict[str, Any]:\n \"\"\"\n Prepare the configuration for utils subcommands\n :param args: Cli args from Arguments()\n :param method: Bot running mode\n :return: Configuration\n \"\"\"\n configuration = Configuration(args, method)\n config = configuration.get_config()\n\n # Ensure these modes are using Dry-run\n config['dry_run'] = True\n validate_config_consistency(config, preliminary=True)\n\n return config" }, { "identifier": "RunMode", "path": "freqtrade/enums/runmode.py", "snippet": "class RunMode(Enum):\n \"\"\"\n Bot running mode (backtest, hyperopt, ...)\n can be \"live\", \"dry-run\", \"backtest\", \"edge\", \"hyperopt\".\n \"\"\"\n LIVE = \"live\"\n DRY_RUN = \"dry_run\"\n BACKTEST = \"backtest\"\n EDGE = \"edge\"\n HYPEROPT = \"hyperopt\"\n UTIL_EXCHANGE = \"util_exchange\"\n UTIL_NO_EXCHANGE = \"util_no_exchange\"\n PLOT = \"plot\"\n WEBSERVER = \"webserver\"\n OTHER = \"other\"" }, { "identifier": "OperationalException", "path": "freqtrade/exceptions.py", "snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\"" }, { "identifier": "init_db", "path": "freqtrade/persistence/models.py", "snippet": "def init_db(db_url: str) -> None:\n \"\"\"\n Initializes this module with the given config,\n registers all known command handlers\n and starts polling for message updates\n :param db_url: Database to use\n :return: None\n \"\"\"\n kwargs: Dict[str, Any] = {}\n\n if db_url == 'sqlite:///':\n raise OperationalException(\n f'Bad db-url {db_url}. For in-memory database, please use `sqlite://`.')\n if db_url == 'sqlite://':\n kwargs.update({\n 'poolclass': StaticPool,\n })\n # Take care of thread ownership\n if db_url.startswith('sqlite://'):\n kwargs.update({\n 'connect_args': {'check_same_thread': False},\n })\n\n try:\n engine = create_engine(db_url, future=True, **kwargs)\n except NoSuchModuleError:\n raise OperationalException(f\"Given value for db_url: '{db_url}' \"\n f\"is no valid database URL! (See {_SQL_DOCS_URL})\")\n\n # https://docs.sqlalchemy.org/en/13/orm/contextual.html#thread-local-scope\n # Scoped sessions proxy requests to the appropriate thread-local session.\n # Since we also use fastAPI, we need to make it aware of the request id, too\n Trade.session = scoped_session(sessionmaker(\n bind=engine, autoflush=False), scopefunc=get_request_or_thread_id)\n Order.session = Trade.session\n PairLock.session = Trade.session\n _KeyValueStoreModel.session = Trade.session\n\n previous_tables = inspect(engine).get_table_names()\n ModelBase.metadata.create_all(engine)\n check_migrate(engine, decl_base=ModelBase, previous_tables=previous_tables)" }, { "identifier": "PairLocks", "path": "freqtrade/persistence/pairlock_middleware.py", "snippet": "class PairLocks:\n \"\"\"\n Pairlocks middleware class\n Abstracts the database layer away so it becomes optional - which will be necessary to support\n backtesting and hyperopt in the future.\n \"\"\"\n\n use_db = True\n locks: List[PairLock] = []\n\n timeframe: str = ''\n\n @staticmethod\n def reset_locks() -> None:\n \"\"\"\n Resets all locks. Only active for backtesting mode.\n \"\"\"\n if not PairLocks.use_db:\n PairLocks.locks = []\n\n @staticmethod\n def lock_pair(pair: str, until: datetime, reason: Optional[str] = None, *,\n now: Optional[datetime] = None, side: str = '*') -> PairLock:\n \"\"\"\n Create PairLock from now to \"until\".\n Uses database by default, unless PairLocks.use_db is set to False,\n in which case a list is maintained.\n :param pair: pair to lock. use '*' to lock all pairs\n :param until: End time of the lock. Will be rounded up to the next candle.\n :param reason: Reason string that will be shown as reason for the lock\n :param now: Current timestamp. Used to determine lock start time.\n :param side: Side to lock pair, can be 'long', 'short' or '*'\n \"\"\"\n lock = PairLock(\n pair=pair,\n lock_time=now or datetime.now(timezone.utc),\n lock_end_time=timeframe_to_next_date(PairLocks.timeframe, until),\n reason=reason,\n side=side,\n active=True\n )\n if PairLocks.use_db:\n PairLock.session.add(lock)\n PairLock.session.commit()\n else:\n PairLocks.locks.append(lock)\n return lock\n\n @staticmethod\n def get_pair_locks(pair: Optional[str], now: Optional[datetime] = None,\n side: str = '*') -> Sequence[PairLock]:\n \"\"\"\n Get all currently active locks for this pair\n :param pair: Pair to check for. Returns all current locks if pair is empty\n :param now: Datetime object (generated via datetime.now(timezone.utc)).\n defaults to datetime.now(timezone.utc)\n \"\"\"\n if not now:\n now = datetime.now(timezone.utc)\n\n if PairLocks.use_db:\n return PairLock.query_pair_locks(pair, now, side).all()\n else:\n locks = [lock for lock in PairLocks.locks if (\n lock.lock_end_time >= now\n and lock.active is True\n and (pair is None or lock.pair == pair)\n and (lock.side == '*' or lock.side == side)\n )]\n return locks\n\n @staticmethod\n def get_pair_longest_lock(\n pair: str, now: Optional[datetime] = None, side: str = '*') -> Optional[PairLock]:\n \"\"\"\n Get the lock that expires the latest for the pair given.\n \"\"\"\n locks = PairLocks.get_pair_locks(pair, now, side=side)\n locks = sorted(locks, key=lambda lock: lock.lock_end_time, reverse=True)\n return locks[0] if locks else None\n\n @staticmethod\n def unlock_pair(pair: str, now: Optional[datetime] = None, side: str = '*') -> None:\n \"\"\"\n Release all locks for this pair.\n :param pair: Pair to unlock\n :param now: Datetime object (generated via datetime.now(timezone.utc)).\n defaults to datetime.now(timezone.utc)\n \"\"\"\n if not now:\n now = datetime.now(timezone.utc)\n\n logger.info(f\"Releasing all locks for {pair}.\")\n locks = PairLocks.get_pair_locks(pair, now, side=side)\n for lock in locks:\n lock.active = False\n if PairLocks.use_db:\n PairLock.session.commit()\n\n @staticmethod\n def unlock_reason(reason: str, now: Optional[datetime] = None) -> None:\n \"\"\"\n Release all locks for this reason.\n :param reason: Which reason to unlock\n :param now: Datetime object (generated via datetime.now(timezone.utc)).\n defaults to datetime.now(timezone.utc)\n \"\"\"\n if not now:\n now = datetime.now(timezone.utc)\n\n if PairLocks.use_db:\n # used in live modes\n logger.info(f\"Releasing all locks with reason '{reason}':\")\n filters = [PairLock.lock_end_time > now,\n PairLock.active.is_(True),\n PairLock.reason == reason\n ]\n locks = PairLock.session.scalars(select(PairLock).filter(*filters)).all()\n for lock in locks:\n logger.info(f\"Releasing lock for {lock.pair} with reason '{reason}'.\")\n lock.active = False\n PairLock.session.commit()\n else:\n # used in backtesting mode; don't show log messages for speed\n locksb = PairLocks.get_pair_locks(None)\n for lock in locksb:\n if lock.reason == reason:\n lock.active = False\n\n @staticmethod\n def is_global_lock(now: Optional[datetime] = None, side: str = '*') -> bool:\n \"\"\"\n :param now: Datetime object (generated via datetime.now(timezone.utc)).\n defaults to datetime.now(timezone.utc)\n \"\"\"\n if not now:\n now = datetime.now(timezone.utc)\n\n return len(PairLocks.get_pair_locks('*', now, side)) > 0\n\n @staticmethod\n def is_pair_locked(pair: str, now: Optional[datetime] = None, side: str = '*') -> bool:\n \"\"\"\n :param pair: Pair to check for\n :param now: Datetime object (generated via datetime.now(timezone.utc)).\n defaults to datetime.now(timezone.utc)\n \"\"\"\n if not now:\n now = datetime.now(timezone.utc)\n\n return (\n len(PairLocks.get_pair_locks(pair, now, side)) > 0\n or PairLocks.is_global_lock(now, side)\n )\n\n @staticmethod\n def get_all_locks() -> Sequence[PairLock]:\n \"\"\"\n Return all locks, also locks with expired end date\n \"\"\"\n if PairLocks.use_db:\n return PairLock.get_all_locks().all()\n else:\n return PairLocks.locks" }, { "identifier": "dt_floor_day", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_floor_day(dt: datetime) -> datetime:\n \"\"\"Return the floor of the day for the given datetime.\"\"\"\n return dt.replace(hour=0, minute=0, second=0, microsecond=0)" }, { "identifier": "dt_now", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_now() -> datetime:\n \"\"\"Return the current datetime in UTC.\"\"\"\n return datetime.now(timezone.utc)" }, { "identifier": "dt_utc", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_utc(year: int, month: int, day: int, hour: int = 0, minute: int = 0, second: int = 0,\n microsecond: int = 0) -> datetime:\n \"\"\"Return a datetime in UTC.\"\"\"\n return datetime(year, month, day, hour, minute, second, microsecond, tzinfo=timezone.utc)" }, { "identifier": "CURRENT_TEST_STRATEGY", "path": "tests/conftest.py", "snippet": "CURRENT_TEST_STRATEGY = 'StrategyTestV3'" }, { "identifier": "EXMS", "path": "tests/conftest.py", "snippet": "EXMS = 'freqtrade.exchange.exchange.Exchange'" }, { "identifier": "create_mock_trades", "path": "tests/conftest.py", "snippet": "def create_mock_trades(fee, is_short: Optional[bool] = False, use_db: bool = True):\n \"\"\"\n Create some fake trades ...\n :param is_short: Optional bool, None creates a mix of long and short trades.\n \"\"\"\n def add_trade(trade):\n if use_db:\n Trade.session.add(trade)\n else:\n LocalTrade.add_bt_trade(trade)\n is_short1 = is_short if is_short is not None else True\n is_short2 = is_short if is_short is not None else False\n # Simulate dry_run entries\n trade = mock_trade_1(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_2(fee, is_short1)\n add_trade(trade)\n\n trade = mock_trade_3(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_4(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_5(fee, is_short2)\n add_trade(trade)\n\n trade = mock_trade_6(fee, is_short1)\n add_trade(trade)\n\n if use_db:\n Trade.commit()" }, { "identifier": "get_args", "path": "tests/conftest.py", "snippet": "def get_args(args):\n return Arguments(args).get_parsed_arg()" }, { "identifier": "log_has", "path": "tests/conftest.py", "snippet": "def log_has(line, logs):\n \"\"\"Check if line is found on some caplog's message.\"\"\"\n return any(line == message for message in logs.messages)" }, { "identifier": "log_has_re", "path": "tests/conftest.py", "snippet": "def log_has_re(line, logs):\n \"\"\"Check if line matches some caplog's message.\"\"\"\n return any(re.match(line, message) for message in logs.messages)" }, { "identifier": "patch_exchange", "path": "tests/conftest.py", "snippet": "def patch_exchange(\n mocker,\n api_mock=None,\n id='binance',\n mock_markets=True,\n mock_supported_modes=True\n) -> None:\n mocker.patch(f'{EXMS}._load_async_markets', return_value={})\n mocker.patch(f'{EXMS}.validate_config', MagicMock())\n mocker.patch(f'{EXMS}.validate_timeframes', MagicMock())\n mocker.patch(f'{EXMS}.id', PropertyMock(return_value=id))\n mocker.patch(f'{EXMS}.name', PropertyMock(return_value=id.title()))\n mocker.patch(f'{EXMS}.precisionMode', PropertyMock(return_value=2))\n\n if mock_markets:\n if isinstance(mock_markets, bool):\n mock_markets = get_markets()\n mocker.patch(f'{EXMS}.markets', PropertyMock(return_value=mock_markets))\n\n if mock_supported_modes:\n mocker.patch(\n f'freqtrade.exchange.{id}.{id.capitalize()}._supported_trading_mode_margin_pairs',\n PropertyMock(return_value=[\n (TradingMode.MARGIN, MarginMode.CROSS),\n (TradingMode.MARGIN, MarginMode.ISOLATED),\n (TradingMode.FUTURES, MarginMode.CROSS),\n (TradingMode.FUTURES, MarginMode.ISOLATED)\n ])\n )\n\n if api_mock:\n mocker.patch(f'{EXMS}._init_ccxt', return_value=api_mock)\n else:\n mocker.patch(f'{EXMS}._init_ccxt', MagicMock())\n mocker.patch(f'{EXMS}.timeframes', PropertyMock(\n return_value=['5m', '15m', '1h', '1d']))" }, { "identifier": "patched_configuration_load_config_file", "path": "tests/conftest.py", "snippet": "def patched_configuration_load_config_file(mocker, config) -> None:\n mocker.patch(\n 'freqtrade.configuration.load_config.load_config_file',\n lambda *args, **kwargs: config\n )" }, { "identifier": "MOCK_TRADE_COUNT", "path": "tests/conftest_trades.py", "snippet": "MOCK_TRADE_COUNT = 6" } ]
import json import re import pytest from datetime import datetime, timedelta from io import BytesIO from pathlib import Path from unittest.mock import MagicMock, PropertyMock from zipfile import ZipFile from freqtrade.commands import (start_backtesting_show, start_convert_data, start_convert_trades, start_create_userdir, start_download_data, start_hyperopt_list, start_hyperopt_show, start_install_ui, start_list_data, start_list_exchanges, start_list_markets, start_list_strategies, start_list_timeframes, start_new_strategy, start_show_trades, start_strategy_update, start_test_pairlist, start_trading, start_webserver) from freqtrade.commands.db_commands import start_convert_db from freqtrade.commands.deploy_commands import (clean_ui_subdir, download_and_install_ui, get_ui_download_url, read_ui_version) from freqtrade.commands.list_commands import start_list_freqAI_models from freqtrade.configuration import setup_utils_configuration from freqtrade.enums import RunMode from freqtrade.exceptions import OperationalException from freqtrade.persistence.models import init_db from freqtrade.persistence.pairlock_middleware import PairLocks from freqtrade.util import dt_floor_day, dt_now, dt_utc from tests.conftest import (CURRENT_TEST_STRATEGY, EXMS, create_mock_trades, get_args, log_has, log_has_re, patch_exchange, patched_configuration_load_config_file) from tests.conftest_trades import MOCK_TRADE_COUNT
13,096
captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active market with LTC as base currency and " "with USDT, NONEXISTENT as quote currencies: XLTCUSDT.\n" in captured.out) # active markets, base=LTC, quote=NONEXISTENT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test tabular output args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 12 active markets:\n" in captured.out) # Test tabular output, no markets found args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test --print-json args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-json" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ('["ADA/USDT:USDT","BLK/BTC","ETH/BTC","ETH/USDT","ETH/USDT:USDT",' '"LTC/BTC","LTC/ETH","LTC/USD","NEO/BTC","TKN/BTC","XLTCUSDT","XRP/BTC"]' in captured.out) # Test --print-csv args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-csv" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Id,Symbol,Base,Quote,Active,Spot,Margin,Future,Leverage" in captured.out) assert ("blkbtc,BLK/BTC,BLK,BTC,True,Spot" in captured.out) assert ("USD-LTC,LTC/USD,LTC,USD,True,Spot" in captured.out) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert re.search(r"^BLK/BTC$", captured.out, re.MULTILINE) assert re.search(r"^LTC/USD$", captured.out, re.MULTILINE) mocker.patch(f'{EXMS}.markets', PropertyMock(side_effect=ValueError)) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] with pytest.raises(OperationalException, match=r"Cannot get markets.*"): start_list_markets(get_args(args), False) def test_create_datadir_failed(caplog): args = [ "create-userdir", ] with pytest.raises(SystemExit): start_create_userdir(get_args(args)) assert log_has("`create-userdir` requires --userdir to be set.", caplog) def test_create_datadir(caplog, mocker): cud = mocker.patch("freqtrade.commands.deploy_commands.create_userdata_dir", MagicMock()) csf = mocker.patch("freqtrade.commands.deploy_commands.copy_sample_files", MagicMock()) args = [ "create-userdir", "--userdir", "/temp/freqtrade/test" ] start_create_userdir(get_args(args)) assert cud.call_count == 1 assert csf.call_count == 1 def test_start_new_strategy(mocker, caplog): wt_mock = mocker.patch.object(Path, "write_text", MagicMock()) mocker.patch.object(Path, "exists", MagicMock(return_value=False)) args = [ "new-strategy", "--strategy", "CoolNewStrategy" ]
def test_setup_utils_configuration(): args = [ 'list-exchanges', '--config', 'config_examples/config_bittrex.example.json', ] config = setup_utils_configuration(get_args(args), RunMode.OTHER) assert "exchange" in config assert config['dry_run'] is True def test_start_trading_fail(mocker, caplog): mocker.patch("freqtrade.worker.Worker.run", MagicMock(side_effect=OperationalException)) mocker.patch("freqtrade.worker.Worker.__init__", MagicMock(return_value=None)) exitmock = mocker.patch("freqtrade.worker.Worker.exit", MagicMock()) args = [ 'trade', '-c', 'config_examples/config_bittrex.example.json' ] start_trading(get_args(args)) assert exitmock.call_count == 1 exitmock.reset_mock() caplog.clear() mocker.patch("freqtrade.worker.Worker.__init__", MagicMock(side_effect=OperationalException)) start_trading(get_args(args)) assert exitmock.call_count == 0 assert log_has('Fatal exception!', caplog) def test_start_webserver(mocker, caplog): api_server_mock = mocker.patch("freqtrade.rpc.api_server.ApiServer", ) args = [ 'webserver', '-c', 'config_examples/config_bittrex.example.json' ] start_webserver(get_args(args)) assert api_server_mock.call_count == 1 def test_list_exchanges(capsys): args = [ "list-exchanges", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.match(r"Exchanges available for Freqtrade.*", captured.out) assert re.search(r".*binance.*", captured.out) assert re.search(r".*bittrex.*", captured.out) # Test with --one-column args = [ "list-exchanges", "--one-column", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.search(r"^binance$", captured.out, re.MULTILINE) assert re.search(r"^bittrex$", captured.out, re.MULTILINE) # Test with --all args = [ "list-exchanges", "--all", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.match(r"All exchanges supported by the ccxt library.*", captured.out) assert re.search(r".*binance.*", captured.out) assert re.search(r".*bittrex.*", captured.out) assert re.search(r".*bitmex.*", captured.out) # Test with --one-column --all args = [ "list-exchanges", "--one-column", "--all", ] start_list_exchanges(get_args(args)) captured = capsys.readouterr() assert re.search(r"^binance$", captured.out, re.MULTILINE) assert re.search(r"^bittrex$", captured.out, re.MULTILINE) assert re.search(r"^bitmex$", captured.out, re.MULTILINE) def test_list_timeframes(mocker, capsys): api_mock = MagicMock() api_mock.timeframes = {'1m': 'oneMin', '5m': 'fiveMin', '30m': 'thirtyMin', '1h': 'hour', '1d': 'day', } patch_exchange(mocker, api_mock=api_mock, id='bittrex') args = [ "list-timeframes", ] pargs = get_args(args) pargs['config'] = None with pytest.raises(OperationalException, match=r"This command requires a configured exchange.*"): start_list_timeframes(pargs) # Test with --config config_examples/config_bittrex.example.json args = [ "list-timeframes", '--config', 'config_examples/config_bittrex.example.json', ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.match("Timeframes available for the exchange `Bittrex`: " "1m, 5m, 30m, 1h, 1d", captured.out) # Test with --exchange bittrex args = [ "list-timeframes", "--exchange", "bittrex", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.match("Timeframes available for the exchange `Bittrex`: " "1m, 5m, 30m, 1h, 1d", captured.out) api_mock.timeframes = {'1m': '1m', '5m': '5m', '15m': '15m', '30m': '30m', '1h': '1h', '6h': '6h', '12h': '12h', '1d': '1d', '3d': '3d', } patch_exchange(mocker, api_mock=api_mock, id='binance') # Test with --exchange binance args = [ "list-timeframes", "--exchange", "binance", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.match("Timeframes available for the exchange `Binance`: " "1m, 5m, 15m, 30m, 1h, 6h, 12h, 1d, 3d", captured.out) # Test with --one-column args = [ "list-timeframes", '--config', 'config_examples/config_bittrex.example.json', "--one-column", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.search(r"^1m$", captured.out, re.MULTILINE) assert re.search(r"^5m$", captured.out, re.MULTILINE) assert re.search(r"^1h$", captured.out, re.MULTILINE) assert re.search(r"^1d$", captured.out, re.MULTILINE) # Test with --exchange binance --one-column args = [ "list-timeframes", "--exchange", "binance", "--one-column", ] start_list_timeframes(get_args(args)) captured = capsys.readouterr() assert re.search(r"^1m$", captured.out, re.MULTILINE) assert re.search(r"^5m$", captured.out, re.MULTILINE) assert re.search(r"^1h$", captured.out, re.MULTILINE) assert re.search(r"^1d$", captured.out, re.MULTILINE) def test_list_markets(mocker, markets_static, capsys): api_mock = MagicMock() patch_exchange(mocker, api_mock=api_mock, id='bittrex', mock_markets=markets_static) # Test with no --config args = [ "list-markets", ] pargs = get_args(args) pargs['config'] = None with pytest.raises(OperationalException, match=r"This command requires a configured exchange.*"): start_list_markets(pargs, False) # Test with --config config_examples/config_bittrex.example.json args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 12 active markets: " "ADA/USDT:USDT, BLK/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, " "LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" in captured.out) patch_exchange(mocker, api_mock=api_mock, id="binance", mock_markets=markets_static) # Test with --exchange args = [ "list-markets", "--exchange", "binance" ] pargs = get_args(args) pargs['config'] = None start_list_markets(pargs, False) captured = capsys.readouterr() assert re.match("\nExchange Binance has 12 active markets:\n", captured.out) patch_exchange(mocker, api_mock=api_mock, id="bittrex", mock_markets=markets_static) # Test with --all: all markets args = [ "list-markets", "--all", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 14 markets: " "ADA/USDT:USDT, BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, ETH/USDT:USDT, " "LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, TKN/BTC, XLTCUSDT, XRP/BTC.\n" in captured.out) # Test list-pairs subcommand: active pairs args = [ "list-pairs", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() assert ("Exchange Bittrex has 9 active pairs: " "BLK/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, NEO/BTC, TKN/BTC, XRP/BTC.\n" in captured.out) # Test list-pairs subcommand with --all: all pairs args = [ "list-pairs", "--all", '--config', 'config_examples/config_bittrex.example.json', "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() assert ("Exchange Bittrex has 11 pairs: " "BLK/BTC, BTT/BTC, ETH/BTC, ETH/USDT, LTC/BTC, LTC/ETH, LTC/USD, LTC/USDT, NEO/BTC, " "TKN/BTC, XRP/BTC.\n" in captured.out) # active markets, base=ETH, LTC args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "ETH", "LTC", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 7 active markets with ETH, LTC as base currencies: " "ETH/BTC, ETH/USDT, ETH/USDT:USDT, LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" in captured.out) # active markets, base=LTC args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 4 active markets with LTC as base currency: " "LTC/BTC, LTC/ETH, LTC/USD, XLTCUSDT.\n" in captured.out) # active markets, quote=USDT, USD args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--quote", "USDT", "USD", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 5 active markets with USDT, USD as quote currencies: " "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, LTC/USD, XLTCUSDT.\n" in captured.out) # active markets, quote=USDT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--quote", "USDT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 4 active markets with USDT as quote currency: " "ADA/USDT:USDT, ETH/USDT, ETH/USDT:USDT, XLTCUSDT.\n" in captured.out) # active markets, base=LTC, quote=USDT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "USDT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active market with LTC as base currency and " "with USDT as quote currency: XLTCUSDT.\n" in captured.out) # active pairs, base=LTC, quote=USDT args = [ "list-pairs", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "USD", "--print-list", ] start_list_markets(get_args(args), True) captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active pair with LTC as base currency and " "with USD as quote currency: LTC/USD.\n" in captured.out) # active markets, base=LTC, quote=USDT, NONEXISTENT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "USDT", "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 1 active market with LTC as base currency and " "with USDT, NONEXISTENT as quote currencies: XLTCUSDT.\n" in captured.out) # active markets, base=LTC, quote=NONEXISTENT args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", "--print-list", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test tabular output args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 12 active markets:\n" in captured.out) # Test tabular output, no markets found args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--base", "LTC", "--quote", "NONEXISTENT", ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Exchange Bittrex has 0 active markets with LTC as base currency and " "with NONEXISTENT as quote currency.\n" in captured.out) # Test --print-json args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-json" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ('["ADA/USDT:USDT","BLK/BTC","ETH/BTC","ETH/USDT","ETH/USDT:USDT",' '"LTC/BTC","LTC/ETH","LTC/USD","NEO/BTC","TKN/BTC","XLTCUSDT","XRP/BTC"]' in captured.out) # Test --print-csv args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--print-csv" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert ("Id,Symbol,Base,Quote,Active,Spot,Margin,Future,Leverage" in captured.out) assert ("blkbtc,BLK/BTC,BLK,BTC,True,Spot" in captured.out) assert ("USD-LTC,LTC/USD,LTC,USD,True,Spot" in captured.out) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] start_list_markets(get_args(args), False) captured = capsys.readouterr() assert re.search(r"^BLK/BTC$", captured.out, re.MULTILINE) assert re.search(r"^LTC/USD$", captured.out, re.MULTILINE) mocker.patch(f'{EXMS}.markets', PropertyMock(side_effect=ValueError)) # Test --one-column args = [ "list-markets", '--config', 'config_examples/config_bittrex.example.json', "--one-column" ] with pytest.raises(OperationalException, match=r"Cannot get markets.*"): start_list_markets(get_args(args), False) def test_create_datadir_failed(caplog): args = [ "create-userdir", ] with pytest.raises(SystemExit): start_create_userdir(get_args(args)) assert log_has("`create-userdir` requires --userdir to be set.", caplog) def test_create_datadir(caplog, mocker): cud = mocker.patch("freqtrade.commands.deploy_commands.create_userdata_dir", MagicMock()) csf = mocker.patch("freqtrade.commands.deploy_commands.copy_sample_files", MagicMock()) args = [ "create-userdir", "--userdir", "/temp/freqtrade/test" ] start_create_userdir(get_args(args)) assert cud.call_count == 1 assert csf.call_count == 1 def test_start_new_strategy(mocker, caplog): wt_mock = mocker.patch.object(Path, "write_text", MagicMock()) mocker.patch.object(Path, "exists", MagicMock(return_value=False)) args = [ "new-strategy", "--strategy", "CoolNewStrategy" ]
start_new_strategy(get_args(args))
6
2023-10-21 10:02:05+00:00
16k
generative-skill-chaining/gsc-code
generative_skill_chaining/envs/pybullet/table/predicates.py
[ { "identifier": "primitive_actions", "path": "generative_skill_chaining/envs/pybullet/table/primitive_actions.py", "snippet": "class PrimitiveAction:\nclass PickAction(PrimitiveAction):\nclass PlaceAction(PrimitiveAction):\nclass PullAction(PrimitiveAction):\nclass PushAction(PrimitiveAction):\n RANGES: Dict[str, Tuple[float, float]]\n RANGES = {\n \"x\": (-0.2, 0.2),\n \"y\": (-0.1, 0.1),\n \"z\": (-0.05, 0.05),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"x\": (-1.0, 1.0),\n \"y\": (-1.0, 1.0),\n \"z\": (0.0, 0.1),\n \"theta\": (-0.25 * np.pi, 0.75 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.2, 0.0),\n \"r_pull\": (-0.4, -0.1),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n RANGES = {\n \"r_reach\": (-0.4, -0.2),\n \"r_push\": (0.1, 0.4),\n \"y\": (-0.05, 0.05),\n \"theta\": (-0.5 * np.pi, 0.5 * np.pi),\n }\n def __init__(self, vector: Optional[np.ndarray] = None):\n def range(cls) -> np.ndarray:\n def random(cls):\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n pos: Optional[np.ndarray] = None,\n theta: Optional[float] = None,\n ):\n def pos(self) -> np.ndarray:\n def pos(self, pos: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_pull: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_pull(self) -> np.ndarray:\n def r_pull(self, r_pull: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:\n def __init__(\n self,\n vector: Optional[np.ndarray] = None,\n r_reach: Optional[float] = None,\n r_push: Optional[float] = None,\n y: Optional[float] = None,\n theta: Optional[float] = None,\n ):\n def r_reach(self) -> np.ndarray:\n def r_reach(self, r_reach: np.ndarray) -> None:\n def r_push(self) -> np.ndarray:\n def r_push(self, r_push: np.ndarray) -> None:\n def y(self) -> np.ndarray:\n def y(self, y: np.ndarray) -> None:\n def theta(self) -> np.ndarray:\n def theta(self, theta: np.ndarray) -> None:\n def __repr__(self) -> str:" }, { "identifier": "utils", "path": "generative_skill_chaining/envs/pybullet/table/utils.py", "snippet": "TABLE_CONSTRAINTS = {\n \"table_z_max\": 0.00,\n \"table_x_min\": 0.28,\n \"table_y_min\": -0.45,\n \"table_y_max\": 0.45,\n \"workspace_x_min\": 0.40,\n \"operational_x_min\": 0.50,\n \"operational_x_max\": 0.60,\n \"obstruction_x_min\": 0.575,\n \"workspace_radius\": 0.7,\n}\nEPSILONS = {\"aabb\": 0.01, \"align\": 0.99, \"twist\": 0.001, \"tipping\": 0.1}\nTWIST_HISTORY: Dict[str, Dict[Object, np.ndarray]] = collections.defaultdict(dict)\ndef compute_margins(obj: Object) -> np.ndarray:\ndef compute_object_pose(obj: Object, theta: float) -> math.Pose:\ndef is_above(obj_a: Object, obj_b: Object) -> bool:\ndef is_upright(obj: Object) -> bool:\ndef is_within_distance(\n obj_a: Object, obj_b: Object, distance: float, physics_id: int\n) -> bool:\ndef is_moving(obj: Object, use_history: Optional[str] = None) -> bool:\ndef is_below_table(obj: Object) -> bool:\ndef is_touching(\n body_a: body.Body,\n body_b: body.Body,\n link_id_a: Optional[int] = None,\n link_id_b: Optional[int] = None,\n) -> bool:\ndef is_intersecting(obj_a: Object, obj_b: Object) -> bool:\ndef is_under(obj_a: Object, obj_b: Object) -> bool:\ndef is_inworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef is_beyondworkspace(\n obj: Optional[Object] = None,\n obj_pos: Optional[np.ndarray] = None,\n distance: Optional[float] = None,\n) -> bool:\ndef load_config(config: Union[str, Any]) -> Any:" }, { "identifier": "Box", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Box(Object):\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 0.1,\n ):\n box = shapes.Box(size=np.array(size), mass=mass, color=np.array(color))\n body_id = shapes.create_body(box, physics_id=physics_id)\n self._shape = box\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = box.size\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return [self._shape]" }, { "identifier": "Hook", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Hook(Object):\n @staticmethod\n def compute_link_positions(\n head_length: float, handle_length: float, handle_y: float, radius: float\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:\n dy = (\n 0.5\n * np.sign(handle_y)\n * max(0, (abs(handle_y) - 1.0) * head_length / 2 + radius)\n )\n pos_handle = np.array([-radius / 2, handle_y * head_length / 2 - dy, 0.0])\n pos_head = np.array([(handle_length - radius) / 2, -dy, 0.0])\n pos_joint = np.array(\n [(handle_length - radius) / 2, handle_y * head_length / 2 - dy, 0.0]\n )\n\n return pos_handle, pos_head, pos_joint\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n head_length: float,\n handle_length: float,\n handle_y: float,\n color: Union[List[float], np.ndarray],\n radius: float = 0.02,\n mass: float = 0.1,\n ):\n if not isinstance(color, np.ndarray):\n color = np.array(color)\n\n pos_handle, pos_head, pos_joint = Hook.compute_link_positions(\n head_length=head_length,\n handle_length=handle_length,\n handle_y=handle_y,\n radius=radius,\n )\n handle = shapes.Cylinder(\n radius=radius,\n length=handle_length,\n mass=(handle_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_handle,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([0.0, 1.0, 0.0]))\n ).coeffs,\n ),\n )\n head = shapes.Cylinder(\n radius=radius,\n length=head_length,\n mass=(head_length / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(\n pos=pos_head,\n quat=eigen.Quaterniond(\n eigen.AngleAxisd(angle=np.pi / 2, axis=np.array([1.0, 0.0, 0.0]))\n ).coeffs,\n ),\n )\n joint = shapes.Sphere(\n radius=radius,\n mass=(radius / (head_length + handle_length + radius)) * mass,\n color=color,\n pose=math.Pose(pos=pos_joint),\n )\n self._shapes = [joint, handle, head]\n body_id = shapes.create_body(\n self.shapes, link_parents=[0, 0], physics_id=physics_id\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.head_length = head_length\n self._state.handle_length = handle_length\n self._state.handle_y = handle_y\n self._radius = radius\n\n self._size = np.array(\n [handle_length + radius, head_length + 2 * abs(pos_head[1]), 2 * radius]\n )\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n\n @property\n def head_length(self) -> float:\n return self._state.head_length # type: ignore\n\n @property\n def handle_length(self) -> float:\n return self._state.handle_length # type: ignore\n\n @property\n def handle_y(self) -> float:\n return self._state.handle_y # type: ignore\n\n @property\n def radius(self) -> float:\n return self._radius\n\n @property\n def size(self) -> np.ndarray:\n return self._size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the convex hulls of the handle and head links.\"\"\"\n handle_pose = self.shapes[1].pose\n head_pose = self.shapes[2].pose\n assert handle_pose is not None and head_pose is not None\n\n positions = np.array(\n [\n [0.0, handle_pose.pos[1], 0.0],\n [head_pose.pos[0], 0.0, 0.0],\n ]\n )\n sizes = np.array(\n [\n [self.size[0], 2 * self.radius, 2 * self.radius],\n [2 * self.radius, self.size[1], 2 * self.radius],\n ]\n )\n bboxes = np.array([positions - 0.5 * sizes, positions + 0.5 * sizes]).swapaxes(\n 0, 1\n )\n\n pose = self.pose() if world_frame else None\n vertices = [compute_bbox_vertices(bbox, pose, project_2d) for bbox in bboxes]\n\n return vertices\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes\n\n # def aabb(self) -> np.ndarray:\n # raise NotImplementedError" }, { "identifier": "Null", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Null(Object):\n def __init__(self, physics_id: int, name: str):\n sphere = shapes.Sphere(radius=0.001)\n body_id = shapes.create_body(sphere, physics_id=physics_id)\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=True\n )\n\n def state(self) -> object_state.ObjectState:\n # Null object state is a zero vector.\n return self._state\n\n def enable_collisions(self) -> None:\n pass\n\n def unfreeze(self) -> bool:\n return False" }, { "identifier": "Object", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Object(body.Body):\n name: str\n is_static: bool = False\n\n def __init__(\n self, physics_id: int, body_id: int, name: str, is_static: bool = False\n ):\n super().__init__(physics_id, body_id)\n\n self.name = name\n self.is_static = is_static\n\n T_pybullet_to_obj = super().pose().to_eigen()\n self._modified_axes = not T_pybullet_to_obj.is_approx(\n eigen.Isometry3d.identity()\n )\n if self._modified_axes:\n self._T_pybullet_to_obj = T_pybullet_to_obj\n self._T_obj_to_pybullet = T_pybullet_to_obj.inverse()\n\n self._state = object_state.ObjectState()\n\n def pose(self) -> math.Pose:\n if not self._modified_axes:\n return super().pose()\n\n return math.Pose.from_eigen(super().pose().to_eigen() * self._T_obj_to_pybullet)\n\n def set_pose(self, pose: math.Pose) -> None:\n if not self._modified_axes:\n return super().set_pose(pose)\n\n return super().set_pose(\n math.Pose.from_eigen(pose.to_eigen() * self._T_pybullet_to_obj)\n )\n\n def disable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 0, 0, physicsClientId=self.physics_id\n )\n\n def enable_collisions(self) -> None:\n for link_id in range(self.dof):\n p.setCollisionFilterGroupMask(\n self.body_id, link_id, 1, 0xFF, physicsClientId=self.physics_id\n )\n\n @property\n def inertia(self) -> dyn.SpatialInertiad:\n try:\n return self._obj_inertia # type: ignore\n except AttributeError:\n pass\n\n self._obj_inertia = super().inertia\n if self._modified_axes:\n self._obj_inertia = self._obj_inertia * self._T_pybullet_to_obj\n\n T_world_to_obj = self.pose().to_eigen().inverse()\n for link_id in range(self.dof):\n link = body.Link(self.physics_id, self.body_id, link_id)\n T_link_to_obj = T_world_to_obj * link.pose().to_eigen()\n self._obj_inertia += link.inertia * T_link_to_obj\n\n return self._obj_inertia\n\n def state(self) -> object_state.ObjectState:\n pose = self.pose()\n aa = eigen.AngleAxisd(eigen.Quaterniond(pose.quat))\n self._state.pos = pose.pos\n self._state.aa = aa.angle * aa.axis\n\n return self._state\n\n def set_state(self, state: object_state.ObjectState) -> None:\n self.set_pose(state.pose())\n\n def reset(self, action_skeleton: List) -> None:\n pass\n\n @classmethod\n def create(\n cls,\n physics_id: int,\n object_type: Optional[str],\n object_kwargs: Dict[str, Any] = {},\n object_groups: Dict[str, \"ObjectGroup\"] = {},\n **kwargs,\n ) -> \"Object\":\n object_class = Null if object_type is None else globals()[object_type]\n if issubclass(object_class, Variant):\n kwargs[\"object_groups\"] = object_groups\n object_kwargs = object_kwargs.copy()\n object_kwargs.update(kwargs)\n return object_class(physics_id=physics_id, **object_kwargs)\n\n def isinstance(self, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool:\n return isinstance(self, class_or_tuple)\n\n def type(self) -> Type[\"Object\"]:\n return type(self)\n\n @property\n def size(self) -> np.ndarray:\n raise NotImplementedError\n\n @property\n def bbox(self) -> np.ndarray:\n \"\"\"Returns the bounding box in the object frame.\n\n If the origin of the object is at its geometric center, this will be\n equivalent to `(-0.5 * self.size, 0.5 * self.size)`.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n raise NotImplementedError\n\n def convex_hulls(\n self, world_frame: bool = True, project_2d: bool = False\n ) -> List[np.ndarray]:\n \"\"\"Computes the object's convex hull.\n\n These hulls will be used for rough collision checking. By default,\n the vertices will be the 6 corners of the object's bounding box\n (`Object.bbox`).\n\n Args:\n world_frame: Whether to transform the vertices in world frame or\n leave them in object frame.\n project_2d: Whether to return the 2d convex hull.\n\n Returns:\n List of arrays of shape [_, 3] or [_, 2], where each array is a\n convex hull.\n \"\"\"\n pose = self.pose() if world_frame else None\n vertices = compute_bbox_vertices(self.bbox, pose, project_2d)\n\n return [vertices]\n\n def aabb(self) -> np.ndarray:\n \"\"\"Computes the axis-aligned bounding box from the object pose and size.\n\n This should be more accurate than `super().aabb()`, which gets the aabb\n from Pybullet. Pybullet returns an *enlarged* aabb for the object *base*\n link, while this returns the exact aabb for the entire object.\n\n Returns:\n An array of shape [2, 3] (min/max, x/y/z).\n \"\"\"\n vertices = np.concatenate(self.convex_hulls(world_frame=True), axis=0)\n xyz_min = vertices.min(axis=0)\n xyz_max = vertices.max(axis=0)\n\n return np.array([xyz_min, xyz_max])\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return []\n\n def __str__(self) -> str:\n return self.name\n\n def __hash__(self) -> int:\n return hash(str(self))\n\n def __eq__(self, other) -> bool:\n return str(self) == str(other)" }, { "identifier": "Rack", "path": "generative_skill_chaining/envs/pybullet/table/objects.py", "snippet": "class Rack(Object):\n TOP_THICKNESS = 0.01\n LEG_THICKNESS = 0.01\n\n def __init__(\n self,\n physics_id: int,\n name: str,\n size: Union[List[float], np.ndarray],\n color: Union[List[float], np.ndarray],\n mass: float = 1.0,\n ):\n mass /= 7 # Divide mass among all 7 parts.\n top = shapes.Box(\n size=np.array([*size[:2], Rack.TOP_THICKNESS]),\n mass=mass,\n color=np.array(color),\n pose=math.Pose(\n pos=np.array([0.0, 0.0, -Rack.TOP_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n xy_legs = np.array([(x, y) for x in (-1, 1) for y in (-1, 1)]) * (\n (np.array(size[:2])[None, :] - Rack.LEG_THICKNESS) / 2\n )\n legs = [\n shapes.Box(\n size=np.array(\n [\n Rack.LEG_THICKNESS,\n Rack.LEG_THICKNESS,\n size[2] - Rack.TOP_THICKNESS - Rack.LEG_THICKNESS,\n ]\n ),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array(\n [\n *xy_leg,\n -(size[2] + Rack.TOP_THICKNESS - Rack.LEG_THICKNESS) / 2,\n ]\n ),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for xy_leg in xy_legs\n ]\n stabilizers = [\n shapes.Box(\n size=np.array([size[0], Rack.LEG_THICKNESS, Rack.LEG_THICKNESS]),\n mass=mass,\n color=np.array([0.0, 0.0, 0.0, 1.0]),\n pose=math.Pose(\n pos=np.array([0.0, y_leg, -size[2] + Rack.LEG_THICKNESS / 2]),\n quat=eigen.Quaterniond.identity().coeffs,\n ),\n )\n for y_leg in xy_legs[:2, 1]\n ]\n self._shapes = [top, *legs, *stabilizers]\n body_id = shapes.create_body(\n self.shapes,\n link_parents=[0] * (len(legs) + len(stabilizers)),\n physics_id=physics_id,\n )\n\n super().__init__(\n physics_id=physics_id, body_id=body_id, name=name, is_static=mass == 0.0\n )\n\n self._state.box_size = np.array(size)\n self._bbox = np.array([-0.5 * self.size, 0.5 * self.size])\n self._bbox[0, 2] = -size[2]\n self._bbox[1, 2] = 0\n\n @property\n def size(self) -> np.ndarray:\n return self._state.box_size\n\n @property\n def bbox(self) -> np.ndarray:\n return self._bbox\n\n @property\n def shapes(self) -> Sequence[shapes.Shape]:\n return self._shapes" }, { "identifier": "math", "path": "generative_skill_chaining/envs/pybullet/sim/math.py", "snippet": "PYBULLET_STEPS_PER_SEC = 240\nPYBULLET_TIMESTEP = 1 / PYBULLET_STEPS_PER_SEC\nclass Pose:\n def from_eigen(pose: eigen.Isometry3d) -> \"Pose\":\n def to_eigen(self) -> eigen.Isometry3d:\ndef comb(n: int, r: int) -> int:" }, { "identifier": "Robot", "path": "generative_skill_chaining/envs/pybullet/sim/robot.py", "snippet": "class Robot(body.Body):\n \"\"\"User-facing robot interface.\"\"\"\n\n def __init__(\n self,\n physics_id: int,\n step_simulation_fn: Callable[[], None],\n urdf: str,\n arm_class: Union[str, Type[arm.Arm]],\n arm_kwargs: Dict[str, Any],\n gripper_class: Union[str, Type[gripper.Gripper]],\n gripper_kwargs: Dict[str, Any],\n ):\n \"\"\"Loads the robot from a urdf file.\n\n Args:\n physics_id: Pybullet physics client id.\n step_simulation_fn: Function to step simulation.\n urdf: Path to urdf.\n arm_class: In the generative_skill_chaining.envs.pybullet namespace.\n arm_kwargs: Arm kwargs from yaml config.\n gripper_class: In the generative_skill_chaining.envs.pybullet namespace.\n gripper_kwargs: Gripper kwargs from yaml config.\n \"\"\"\n body_id = p.loadURDF(\n fileName=urdf,\n useFixedBase=True,\n flags=p.URDF_USE_INERTIA_FROM_FILE\n | p.URDF_MAINTAIN_LINK_ORDER, # | p.URDF_MERGE_FIXED_LINKS\n physicsClientId=physics_id,\n )\n super().__init__(physics_id, body_id)\n\n if isinstance(arm_class, str):\n arm_class = configs.get_class(arm_class, pybullet)\n if isinstance(gripper_class, str):\n gripper_class = configs.get_class(gripper_class, pybullet)\n\n self._arm = arm_class(self.physics_id, self.body_id, **arm_kwargs)\n T_world_to_ee = dyn.cartesian_pose(self.arm.ab).inverse()\n self._gripper = gripper_class(\n self.physics_id, self.body_id, T_world_to_ee, **gripper_kwargs\n )\n\n self.step_simulation = step_simulation_fn\n\n @property\n def arm(self) -> arm.Arm:\n \"\"\"Controllable arm.\"\"\"\n return self._arm\n\n @property\n def gripper(self) -> gripper.Gripper:\n \"\"\"Controllable gripper.\"\"\"\n return self._gripper\n\n @property\n def home_pose(self) -> math.Pose:\n return self.arm.home_pose\n\n def reset(self) -> bool:\n \"\"\"Resets the robot by setting the arm to its home configuration and the gripper to the open position.\n\n This method disables torque control and bypasses simulation.\n \"\"\"\n self.gripper.reset()\n self.clear_load()\n status = self.arm.reset()\n if isinstance(self.arm, real.arm.Arm):\n status = self.goto_configuration(self.arm.q_home)\n return status\n\n def clear_load(self) -> None:\n \"\"\"Resets the end-effector load to the gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n self.arm.ab.replace_load(self.gripper.inertia)\n else:\n self.arm.ab.clear_load()\n\n def set_load(self, inertia: dyn.SpatialInertiad) -> None:\n \"\"\"Sets the end-effector load to the sum of the given inertia and gripper inertia.\"\"\"\n if self.gripper.inertia is not None:\n inertia = inertia + self.gripper.inertia\n self.arm.ab.replace_load(inertia)\n\n def get_state(self) -> Dict[str, Any]:\n return {\n \"arm\": self.arm.get_state(),\n \"gripper\": self.gripper.get_state(),\n \"load\": copy.deepcopy(self.arm.ab.inertia_load),\n }\n\n def set_state(self, state: Dict[str, Any]) -> None:\n self.arm.set_state(state[\"arm\"])\n self.gripper.set_state(state[\"gripper\"])\n idx_link, load_inertia = next(iter(state[\"load\"].items()))\n self.arm.ab.replace_load(load_inertia, idx_link)\n\n def goto_home(self) -> bool:\n \"\"\"Uses opspace control to go to the home position.\"\"\"\n return self.goto_pose(\n self.home_pose.pos,\n self.home_pose.quat,\n pos_gains=(64, 16),\n ori_gains=(64, 16),\n )\n\n def _is_colliding(\n self, body_id_a: int, body_id_b: int, link_id_a: Optional[int] = None\n ) -> bool:\n kwargs = {}\n if link_id_a is not None:\n kwargs[\"linkIndexA\"] = link_id_a\n contacts = p.getContactPoints(\n bodyA=body_id_a, bodyB=body_id_b, physicsClientId=self.physics_id, **kwargs\n )\n\n if not contacts:\n return False\n\n force = contacts[0][9]\n return force > 0.0\n\n def goto_pose(\n self,\n pos: Optional[np.ndarray] = None,\n quat: Optional[Union[eigen.Quaterniond, np.ndarray]] = None,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n ori_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n check_collisions: Sequence[int] = [],\n check_collision_freq: int = 10,\n ) -> bool:\n \"\"\"Uses opspace control to go to the desired pose.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n pos: Optional position. Maintains current position if None.\n quat: Optional quaternion. Maintains current orientation if None.\n pos_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n ori_gains: (kp, kv) gains or [3 x 2] array of xyz gains.\n timeout: Uses the timeout specified in the yaml arm config if None.\n check_collisions: Raise an exception if the gripper or grasped\n object collides with any of the body_ids in this list.\n check_collision_freq: Iteration interval with which to check\n collisions.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n if check_collisions:\n body_ids_a = [self.body_id] * len(self.gripper.finger_links)\n link_ids_a: List[Optional[int]] = list(self.gripper.finger_links)\n grasp_body_id = self.gripper._gripper_state.grasp_body_id\n if grasp_body_id is not None:\n body_ids_a.append(grasp_body_id)\n link_ids_a.append(None)\n\n # Set the pose goal.\n self.arm.set_pose_goal(pos, quat, pos_gains, ori_gains, timeout)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter = 0\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n iter += 1\n\n if isinstance(self.arm, real.arm.Arm):\n continue\n\n if not check_collisions or iter % check_collision_freq != 0:\n continue\n\n # Terminate early if there are collisions with the gripper fingers\n # or grasped object.\n for body_id_a, link_id_a in zip(body_ids_a, link_ids_a):\n for body_id_b in check_collisions:\n if self._is_colliding(body_id_a, body_id_b, link_id_a):\n raise ControlException(\n f\"Robot.goto_pose({pos}, {quat}): Collision {body_id_a}:{link_id_a}, {body_id_b}\"\n )\n # print(\"Robot.goto_pose:\", pos, quat, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.goto_pose({pos}, {quat}): Singularity\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def goto_configuration(self, q: np.ndarray) -> bool:\n \"\"\"Sets the robot to the desired joint configuration.\n\n Args:\n q: Joint configuration.\n Returns:\n True if the controller converges to the desired position or zero\n velocity, false if the command times out.\n \"\"\"\n # Set the configuration goal.\n self.arm.set_configuration_goal(q)\n\n # Simulate until the pose goal is reached.\n status = self.arm.update_torques()\n self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.step_simulation()\n status = self.arm.update_torques()\n self.gripper.update_torques()\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp(\n self,\n command: float,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n ) -> bool:\n \"\"\"Sets the gripper to the desired grasp (0.0 open, 1.0 closed).\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Any existing grasp constraints will be cleared and no new ones will be\n created. Use `Robot.grasp_object()` to create a grasp constraint.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n Returns:\n True if the grasp controller converges to the desired position or\n zero velocity, false if the command times out.\n \"\"\"\n # Clear any existing grasp constraints.\n self.gripper.remove_grasp_constraint()\n self.clear_load()\n\n # Set the new grasp command.\n self.gripper.set_grasp(command, pos_gains, timeout)\n\n # Simulate until the grasp command finishes.\n status = self.gripper.update_torques()\n while status == articulated_body.ControlStatus.IN_PROGRESS:\n self.arm.update_torques()\n self.step_simulation()\n status = self.gripper.update_torques()\n # print(\"Robot.grasp:\", command, status)\n\n if status == articulated_body.ControlStatus.ABORTED:\n raise ControlException(f\"Robot.grasp({command})\")\n\n return status in (\n articulated_body.ControlStatus.POS_CONVERGED,\n articulated_body.ControlStatus.VEL_CONVERGED,\n )\n\n def grasp_object(\n self,\n obj: body.Body,\n pos_gains: Optional[Union[Tuple[float, float], np.ndarray]] = None,\n timeout: Optional[float] = None,\n realistic: bool = True,\n ) -> bool:\n \"\"\"Attempts to grasp an object and attaches the object to the gripper via a pose constraint.\n\n This method blocks until the command finishes or times out. A\n ControlException will be raised if the grasp controller is aborted.\n\n Args:\n command: Desired grasp (range from 0.0 open to 1.0 closed).\n pos_gains: kp gains (only used for sim).\n timeout: Uses the timeout specified in the yaml gripper config if None.\n realistic: If false, creates a pose constraint regardless of whether\n the object is in a secure grasp.\n Returns:\n True if the object is successfully grasped, false otherwise.\n \"\"\"\n if realistic:\n self.grasp(1, pos_gains, timeout)\n\n # Wait for grasped object to settle.\n status = self.gripper.update_torques()\n while (\n status\n in (\n articulated_body.ControlStatus.VEL_CONVERGED,\n articulated_body.ControlStatus.IN_PROGRESS,\n )\n and self.gripper._gripper_state.iter_timeout >= 0\n and (obj.twist() > 0.001).any()\n ):\n self.arm.update_torques()\n status = self.gripper.update_torques()\n self.step_simulation()\n\n # Make sure fingers aren't fully closed.\n if status == articulated_body.ControlStatus.POS_CONVERGED:\n return False\n\n # Lock the object in place with a grasp constraint.\n if not self.gripper.create_grasp_constraint(obj.body_id, realistic):\n return False\n\n # Add object load.\n T_obj_to_world = obj.pose().to_eigen()\n T_ee_to_world = dyn.cartesian_pose(self.arm.ab)\n T_obj_to_ee = T_ee_to_world.inverse() * T_obj_to_world\n self.set_load(obj.inertia * T_obj_to_ee)\n\n return True" } ]
import dataclasses import random import numpy as np import pybullet as p import symbolic from typing import Optional, Dict, List, Sequence, Tuple, Type from ctrlutils import eigen from shapely.geometry import Polygon, LineString from generative_skill_chaining.envs.pybullet.table import primitive_actions, utils from generative_skill_chaining.envs.pybullet.table.objects import Box, Hook, Null, Object, Rack from generative_skill_chaining.envs.pybullet.sim import math from generative_skill_chaining.envs.pybullet.sim.robot import Robot
11,347
return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["obstruction_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class BeyondWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True distance = float(np.linalg.norm(obj.pose().pos[:2])) if not utils.is_beyondworkspace(obj=obj, distance=distance): return False dbprint(f"{self}.value():", False, "- distance:", distance) return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds r = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min[0] = r * np.cos(np.arcsin(0.5 * (xy_max[1] - xy_min[1]) / r)) xy_min += margin xy_max -= margin return bounds, margin class InOodZone(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = bounds[0, 0] xy_max[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin class Inhand(Predicate): MAX_GRASP_ATTEMPTS = 1 def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, )
dbprint = lambda *args: None # noqa # dbprint = print @dataclasses.dataclass class Predicate: args: List[str] @classmethod def create(cls, proposition: str) -> "Predicate": predicate, args = symbolic.parse_proposition(proposition) predicate_classes = { name.lower(): predicate_class for name, predicate_class in globals().items() } predicate_class = predicate_classes[predicate] return predicate_class(args) def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Generates a geometric grounding of a predicate.""" return True def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence["Predicate"] ) -> bool: """Evaluates to True if the geometrically grounded predicate is satisfied.""" return True def get_arg_objects(self, objects: Dict[str, Object]) -> List[Object]: return [objects[arg] for arg in self.args] def __str__(self) -> str: return f"{type(self).__name__.lower()}({', '.join(self.args)})" def __hash__(self) -> int: return hash(str(self)) def __eq__(self, other) -> bool: return str(self) == str(other) class HandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the tail end on a hook object.""" pass class UpperHandleGrasp(Predicate): """Unary predicate enforcing a handle grasp towards the head on a hook object.""" pass class Free(Predicate): """Unary predicate enforcing that no top-down occlusions exist on the object.""" DISTANCE_MIN: Dict[Tuple[Type[Object], Type[Object]], float] = { (Box, Box): 0.05, (Box, Hook): 0.05, (Box, Rack): 0.1, (Hook, Rack): 0.1, } def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: child_obj = self.get_arg_objects(objects)[0] if child_obj.isinstance(Null): return True for obj in objects.values(): if f"inhand({obj})" in state or obj.isinstance(Null) or obj == child_obj: continue if utils.is_under(child_obj, obj): dbprint(f"{self}.value():", False, f"{child_obj} under {obj}") return False obj_a, obj_b = sorted( (child_obj.type(), obj.type()), key=lambda x: x.__name__ ) try: min_distance = Free.DISTANCE_MIN[(obj_a, obj_b)] except KeyError: continue if ( (obj.isinstance(Rack) and f"beyondworkspace({obj})" in state) or f"infront({child_obj}, rack)" in state or f"infront({obj}, rack)" in state ): min_distance = 0.04 if utils.is_within_distance( child_obj, obj, min_distance, obj.physics_id ) and not utils.is_above(child_obj, obj): dbprint( f"{self}.value():", False, f"{child_obj} and {obj} are within min distance", ) return False return True class Tippable(Predicate): """Unary predicate admitting non-upright configurations of an object.""" pass class TableBounds: """Predicate that specifies minimum and maximum x-y bounds on the table.""" MARGIN_SCALE: Dict[Type[Object], float] = {Hook: 0.25} def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds on the table as well as the modified margins.""" assert parent_obj.name == "table" zone = type(self).__name__.lower() poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: pos_bounds = poslimit.bounds(child_obj) zone = random.choice(list(pos_bounds.keys())) # Compute poslimit zone-specific angle if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) return pos_bounds[zone], margin elif f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin @staticmethod def get_poslimit( obj: Object, state: Sequence[Predicate], ) -> Optional["PosLimit"]: try: idx_prop = state.index(f"poslimit({obj})") except ValueError: return None prop = state[idx_prop] assert isinstance(prop, PosLimit) return prop @classmethod def get_zone( cls, obj: Object, state: Sequence[Predicate], ) -> Optional["TableBounds"]: zones = [ prop for prop in state if isinstance(prop, TableBounds) and prop.args[0] == obj ] if not zones and f"on({obj}, table)" in state: return cls() elif len(zones) == 1: return zones[0] elif len(zones) != 1: raise ValueError(f"{obj} cannot be in multiple zones: {zones}") return None @staticmethod def scale_margin(obj: Object, margins: np.ndarray) -> np.ndarray: try: bounds = TableBounds.MARGIN_SCALE[obj.type()] except KeyError: return margins return bounds * margins class Aligned(Predicate): """Unary predicate enforcing that the object and world coordinate frames align.""" ANGLE_EPS: float = 0.002 ANGLE_STD: float = 0.05 ANGLE_ABS: float = 0.1 ZONE_ANGLES: Dict[Tuple[Type[Object], Optional[str]], float] = { (Rack, "inworkspace"): 0.5 * np.pi, (Rack, "beyondworkspace"): 0.0, } # def value( # self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] # ) -> bool: # obj = self.get_arg_objects(objects)[0] # if obj.isinstance(Null): # return True # try: # zone = TableBounds.get_zone(obj=obj, state=state) # angle_mean = Aligned.ZONE_ANGLES[(obj.type(), type(zone).__name__.lower())] # if ( # angle_mean - Aligned.ANGLE_ABS < -np.pi # or angle_mean + Aligned.ANGLE_ABS > np.pi # ): # raise ValueError("Cannot recover wrapped angle.") # except KeyError: # angle_mean = 0.0 # angle = eigen.AngleAxisd(eigen.Quaterniond(obj.pose().quat)).angle - angle_mean # if not ( # Aligned.ANGLE_EPS <= abs(angle) <= Aligned.ANGLE_ABS # and utils.is_upright(obj) # ): # dbprint(f"{self}.value():", False) # return False # return True @staticmethod def sample_angle(obj: Object, zone: Optional[str] = None) -> float: angle = 0.0 while abs(angle) < Aligned.ANGLE_EPS: angle = np.random.randn() * Aligned.ANGLE_STD try: angle_mu = Aligned.ZONE_ANGLES[(obj.type(), zone)] except KeyError: angle_mu = 0.0 angle = np.clip( angle + angle_mu, angle_mu - Aligned.ANGLE_ABS, angle_mu + Aligned.ANGLE_ABS, ) angle = (angle + np.pi) % (2 * np.pi) - np.pi return angle class PosLimit(Predicate): """Unary predicate limiting the placement positions of particular object types.""" POS_EPS: Dict[Type[Object], float] = {Rack: 0.01} POS_SPEC: Dict[Type[Object], Dict[str, np.ndarray]] = { Rack: { "inworkspace": np.array([0.44, -0.33]), "beyondworkspace": np.array([0.82, 0.00]), } } def bounds(self, child_obj: Object) -> Dict[str, np.ndarray]: assert child_obj.name == self.args[0] if child_obj.type() not in PosLimit.POS_SPEC: raise ValueError(f"Positions not specified for {child_obj.type()}") eps = PosLimit.POS_EPS[child_obj.type()] xys = PosLimit.POS_SPEC[child_obj.type()] bounds = {k: np.array([xy - eps, xy + eps]) for k, xy in xys.items()} return bounds class InWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance((Null, Rack)): # Rack is in workspace by construction. return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not utils.is_inworkspace(obj_pos=obj_pos, distance=distance): dbprint( f"{self}.value():", False, "- pos:", obj_pos[:2], "distance:", distance ) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds inside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class InCollisionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the collision zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["workspace_x_min"] <= obj.pose().pos[0] < utils.TABLE_CONSTRAINTS["operational_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["workspace_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_min += margin xy_max -= margin return bounds, margin class InOperationalZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the operational zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( utils.TABLE_CONSTRAINTS["operational_x_min"] <= obj_pos[0] < utils.TABLE_CONSTRAINTS["operational_x_max"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["operational_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["operational_x_max"] xy_min += margin xy_max -= margin return bounds, margin class InObstructionZone(Predicate, TableBounds): """Unary predicate ensuring the object is in the obstruction zone.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True obj_pos = obj.pose().pos[:2] distance = float(np.linalg.norm(obj_pos)) if not ( obj_pos[0] >= utils.TABLE_CONSTRAINTS["obstruction_x_min"] and distance < utils.TABLE_CONSTRAINTS["workspace_radius"] ): dbprint(f"{self}.value():", False, "- pos:", obj_pos, "distance:", distance) return False return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: assert child_obj.name == self.args[0] and parent_obj.name == "table" margin = TableBounds.scale_margin(child_obj, margin) bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = utils.TABLE_CONSTRAINTS["obstruction_x_min"] xy_max[0] = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min += margin xy_max -= margin return bounds, margin class BeyondWorkspace(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True distance = float(np.linalg.norm(obj.pose().pos[:2])) if not utils.is_beyondworkspace(obj=obj, distance=distance): return False dbprint(f"{self}.value():", False, "- distance:", distance) return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" zone = type(self).__name__.lower() if f"aligned({child_obj})" in state: theta = Aligned.sample_angle(obj=child_obj, zone=zone) child_obj.set_pose(utils.compute_object_pose(child_obj, theta)) margin = utils.compute_margins(child_obj) poslimit = TableBounds.get_poslimit(child_obj, state) if poslimit is not None: return poslimit.bounds(child_obj)[zone], margin bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds r = utils.TABLE_CONSTRAINTS["workspace_radius"] xy_min[0] = r * np.cos(np.arcsin(0.5 * (xy_max[1] - xy_min[1]) / r)) xy_min += margin xy_max -= margin return bounds, margin class InOodZone(Predicate, TableBounds): """Unary predicate ensuring than an object is in beyond the robot workspace.""" def value( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: obj = self.get_arg_objects(objects)[0] if obj.isinstance(Null): return True return True def get_bounds_and_margin( self, child_obj: Object, parent_obj: Object, state: Sequence[Predicate], margin: np.ndarray, ) -> Tuple[np.ndarray, np.ndarray]: """Returns the minimum and maximum x-y bounds outside the workspace.""" assert child_obj.name == self.args[0] and parent_obj.name == "table" bounds = parent_obj.aabb()[:, :2] xy_min, xy_max = bounds xy_min[0] = bounds[0, 0] xy_max[0] = utils.TABLE_CONSTRAINTS["table_x_min"] xy_min += margin xy_max -= margin return bounds, margin class Inhand(Predicate): MAX_GRASP_ATTEMPTS = 1 def sample( self, robot: Robot, objects: Dict[str, Object], state: Sequence[Predicate] ) -> bool: """Samples a geometric grounding of the InHand(a) predicate.""" obj = self.get_arg_objects(objects)[0] if obj.is_static: return True # Generate grasp pose. for i in range(Inhand.MAX_GRASP_ATTEMPTS): grasp_pose = self.generate_grasp_pose( obj, handlegrasp=f"handlegrasp({obj})" in state, upperhandlegrasp=f"upperhandlegrasp({obj})" in state, )
obj_pose = math.Pose.from_eigen(grasp_pose.to_eigen().inverse())
7
2023-10-16 00:22:40+00:00
16k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/numpy/array_api/linalg.py
[ { "identifier": "_floating_dtypes", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_dtypes.py", "snippet": "def _result_type(type1, type2):" }, { "identifier": "reshape", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_manipulation_functions.py", "snippet": "def reshape(x: Array, \n /, \n shape: Tuple[int, ...],\n *,\n copy: Optional[Bool] = None) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:func:`np.reshape <numpy.reshape>`.\n\n See its docstring for more information.\n \"\"\"\n\n data = x._array\n if copy:\n data = np.copy(data)\n\n reshaped = np.reshape(data, shape)\n\n if copy is False and not np.shares_memory(data, reshaped):\n raise AttributeError(\"Incompatible shape for in-place modification.\")\n\n return Array._new(reshaped)" }, { "identifier": "Array", "path": "backend/venv/lib/python3.10/site-packages/numpy/array_api/_array_object.py", "snippet": "class Array:\n \"\"\"\n n-d array object for the array API namespace.\n\n See the docstring of :py:obj:`np.ndarray <numpy.ndarray>` for more\n information.\n\n This is a wrapper around numpy.ndarray that restricts the usage to only\n those things that are required by the array API namespace. Note,\n attributes on this object that start with a single underscore are not part\n of the API specification and should only be used internally. This object\n should not be constructed directly. Rather, use one of the creation\n functions, such as asarray().\n\n \"\"\"\n _array: np.ndarray[Any, Any]\n\n # Use a custom constructor instead of __init__, as manually initializing\n # this class is not supported API.\n @classmethod\n def _new(cls, x, /):\n \"\"\"\n This is a private method for initializing the array API Array\n object.\n\n Functions outside of the array_api submodule should not use this\n method. Use one of the creation functions instead, such as\n ``asarray``.\n\n \"\"\"\n obj = super().__new__(cls)\n # Note: The spec does not have array scalars, only 0-D arrays.\n if isinstance(x, np.generic):\n # Convert the array scalar to a 0-D array\n x = np.asarray(x)\n if x.dtype not in _all_dtypes:\n raise TypeError(\n f\"The array_api namespace does not support the dtype '{x.dtype}'\"\n )\n obj._array = x\n return obj\n\n # Prevent Array() from working\n def __new__(cls, *args, **kwargs):\n raise TypeError(\n \"The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead.\"\n )\n\n # These functions are not required by the spec, but are implemented for\n # the sake of usability.\n\n def __str__(self: Array, /) -> str:\n \"\"\"\n Performs the operation __str__.\n \"\"\"\n return self._array.__str__().replace(\"array\", \"Array\")\n\n def __repr__(self: Array, /) -> str:\n \"\"\"\n Performs the operation __repr__.\n \"\"\"\n suffix = f\", dtype={self.dtype.name})\"\n if 0 in self.shape:\n prefix = \"empty(\"\n mid = str(self.shape)\n else:\n prefix = \"Array(\"\n mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)\n return prefix + mid + suffix\n\n # This function is not required by the spec, but we implement it here for\n # convenience so that np.asarray(np.array_api.Array) will work.\n def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:\n \"\"\"\n Warning: this method is NOT part of the array API spec. Implementers\n of other libraries need not include it, and users should not assume it\n will be present in other implementations.\n\n \"\"\"\n return np.asarray(self._array, dtype=dtype)\n\n # These are various helper functions to make the array behavior match the\n # spec in places where it either deviates from or is more strict than\n # NumPy behavior\n\n def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:\n \"\"\"\n Helper function for operators to only allow specific input dtypes\n\n Use like\n\n other = self._check_allowed_dtypes(other, 'numeric', '__add__')\n if other is NotImplemented:\n return other\n \"\"\"\n\n if self.dtype not in _dtype_categories[dtype_category]:\n raise TypeError(f\"Only {dtype_category} dtypes are allowed in {op}\")\n if isinstance(other, (int, complex, float, bool)):\n other = self._promote_scalar(other)\n elif isinstance(other, Array):\n if other.dtype not in _dtype_categories[dtype_category]:\n raise TypeError(f\"Only {dtype_category} dtypes are allowed in {op}\")\n else:\n return NotImplemented\n\n # This will raise TypeError for type combinations that are not allowed\n # to promote in the spec (even if the NumPy array operator would\n # promote them).\n res_dtype = _result_type(self.dtype, other.dtype)\n if op.startswith(\"__i\"):\n # Note: NumPy will allow in-place operators in some cases where\n # the type promoted operator does not match the left-hand side\n # operand. For example,\n\n # >>> a = np.array(1, dtype=np.int8)\n # >>> a += np.array(1, dtype=np.int16)\n\n # The spec explicitly disallows this.\n if res_dtype != self.dtype:\n raise TypeError(\n f\"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}\"\n )\n\n return other\n\n # Helper function to match the type promotion rules in the spec\n def _promote_scalar(self, scalar):\n \"\"\"\n Returns a promoted version of a Python scalar appropriate for use with\n operations on self.\n\n This may raise an OverflowError in cases where the scalar is an\n integer that is too large to fit in a NumPy integer dtype, or\n TypeError when the scalar type is incompatible with the dtype of self.\n \"\"\"\n # Note: Only Python scalar types that match the array dtype are\n # allowed.\n if isinstance(scalar, bool):\n if self.dtype not in _boolean_dtypes:\n raise TypeError(\n \"Python bool scalars can only be promoted with bool arrays\"\n )\n elif isinstance(scalar, int):\n if self.dtype in _boolean_dtypes:\n raise TypeError(\n \"Python int scalars cannot be promoted with bool arrays\"\n )\n if self.dtype in _integer_dtypes:\n info = np.iinfo(self.dtype)\n if not (info.min <= scalar <= info.max):\n raise OverflowError(\n \"Python int scalars must be within the bounds of the dtype for integer arrays\"\n )\n # int + array(floating) is allowed\n elif isinstance(scalar, float):\n if self.dtype not in _floating_dtypes:\n raise TypeError(\n \"Python float scalars can only be promoted with floating-point arrays.\"\n )\n elif isinstance(scalar, complex):\n if self.dtype not in _complex_floating_dtypes:\n raise TypeError(\n \"Python complex scalars can only be promoted with complex floating-point arrays.\"\n )\n else:\n raise TypeError(\"'scalar' must be a Python scalar\")\n\n # Note: scalars are unconditionally cast to the same dtype as the\n # array.\n\n # Note: the spec only specifies integer-dtype/int promotion\n # behavior for integers within the bounds of the integer dtype.\n # Outside of those bounds we use the default NumPy behavior (either\n # cast or raise OverflowError).\n return Array._new(np.array(scalar, self.dtype))\n\n @staticmethod\n def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:\n \"\"\"\n Normalize inputs to two arg functions to fix type promotion rules\n\n NumPy deviates from the spec type promotion rules in cases where one\n argument is 0-dimensional and the other is not. For example:\n\n >>> import numpy as np\n >>> a = np.array([1.0], dtype=np.float32)\n >>> b = np.array(1.0, dtype=np.float64)\n >>> np.add(a, b) # The spec says this should be float64\n array([2.], dtype=float32)\n\n To fix this, we add a dimension to the 0-dimension array before passing it\n through. This works because a dimension would be added anyway from\n broadcasting, so the resulting shape is the same, but this prevents NumPy\n from not promoting the dtype.\n \"\"\"\n # Another option would be to use signature=(x1.dtype, x2.dtype, None),\n # but that only works for ufuncs, so we would have to call the ufuncs\n # directly in the operator methods. One should also note that this\n # sort of trick wouldn't work for functions like searchsorted, which\n # don't do normal broadcasting, but there aren't any functions like\n # that in the array API namespace.\n if x1.ndim == 0 and x2.ndim != 0:\n # The _array[None] workaround was chosen because it is relatively\n # performant. broadcast_to(x1._array, x2.shape) is much slower. We\n # could also manually type promote x2, but that is more complicated\n # and about the same performance as this.\n x1 = Array._new(x1._array[None])\n elif x2.ndim == 0 and x1.ndim != 0:\n x2 = Array._new(x2._array[None])\n return (x1, x2)\n\n # Note: A large fraction of allowed indices are disallowed here (see the\n # docstring below)\n def _validate_index(self, key):\n \"\"\"\n Validate an index according to the array API.\n\n The array API specification only requires a subset of indices that are\n supported by NumPy. This function will reject any index that is\n allowed by NumPy but not required by the array API specification. We\n always raise ``IndexError`` on such indices (the spec does not require\n any specific behavior on them, but this makes the NumPy array API\n namespace a minimal implementation of the spec). See\n https://data-apis.org/array-api/latest/API_specification/indexing.html\n for the full list of required indexing behavior\n\n This function raises IndexError if the index ``key`` is invalid. It\n only raises ``IndexError`` on indices that are not already rejected by\n NumPy, as NumPy will already raise the appropriate error on such\n indices. ``shape`` may be None, in which case, only cases that are\n independent of the array shape are checked.\n\n The following cases are allowed by NumPy, but not specified by the array\n API specification:\n\n - Indices to not include an implicit ellipsis at the end. That is,\n every axis of an array must be explicitly indexed or an ellipsis\n included. This behaviour is sometimes referred to as flat indexing.\n\n - The start and stop of a slice may not be out of bounds. In\n particular, for a slice ``i:j:k`` on an axis of size ``n``, only the\n following are allowed:\n\n - ``i`` or ``j`` omitted (``None``).\n - ``-n <= i <= max(0, n - 1)``.\n - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.\n - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.\n\n - Boolean array indices are not allowed as part of a larger tuple\n index.\n\n - Integer array indices are not allowed (with the exception of 0-D\n arrays, which are treated the same as scalars).\n\n Additionally, it should be noted that indices that would return a\n scalar in NumPy will return a 0-D array. Array scalars are not allowed\n in the specification, only 0-D arrays. This is done in the\n ``Array._new`` constructor, not this function.\n\n \"\"\"\n _key = key if isinstance(key, tuple) else (key,)\n for i in _key:\n if isinstance(i, bool) or not (\n isinstance(i, SupportsIndex) # i.e. ints\n or isinstance(i, slice)\n or i == Ellipsis\n or i is None\n or isinstance(i, Array)\n or isinstance(i, np.ndarray)\n ):\n raise IndexError(\n f\"Single-axes index {i} has {type(i)=}, but only \"\n \"integers, slices (:), ellipsis (...), newaxis (None), \"\n \"zero-dimensional integer arrays and boolean arrays \"\n \"are specified in the Array API.\"\n )\n\n nonexpanding_key = []\n single_axes = []\n n_ellipsis = 0\n key_has_mask = False\n for i in _key:\n if i is not None:\n nonexpanding_key.append(i)\n if isinstance(i, Array) or isinstance(i, np.ndarray):\n if i.dtype in _boolean_dtypes:\n key_has_mask = True\n single_axes.append(i)\n else:\n # i must not be an array here, to avoid elementwise equals\n if i == Ellipsis:\n n_ellipsis += 1\n else:\n single_axes.append(i)\n\n n_single_axes = len(single_axes)\n if n_ellipsis > 1:\n return # handled by ndarray\n elif n_ellipsis == 0:\n # Note boolean masks must be the sole index, which we check for\n # later on.\n if not key_has_mask and n_single_axes < self.ndim:\n raise IndexError(\n f\"{self.ndim=}, but the multi-axes index only specifies \"\n f\"{n_single_axes} dimensions. If this was intentional, \"\n \"add a trailing ellipsis (...) which expands into as many \"\n \"slices (:) as necessary - this is what np.ndarray arrays \"\n \"implicitly do, but such flat indexing behaviour is not \"\n \"specified in the Array API.\"\n )\n\n if n_ellipsis == 0:\n indexed_shape = self.shape\n else:\n ellipsis_start = None\n for pos, i in enumerate(nonexpanding_key):\n if not (isinstance(i, Array) or isinstance(i, np.ndarray)):\n if i == Ellipsis:\n ellipsis_start = pos\n break\n assert ellipsis_start is not None # sanity check\n ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)\n indexed_shape = (\n self.shape[:ellipsis_start] + self.shape[ellipsis_end:]\n )\n for i, side in zip(single_axes, indexed_shape):\n if isinstance(i, slice):\n if side == 0:\n f_range = \"0 (or None)\"\n else:\n f_range = f\"between -{side} and {side - 1} (or None)\"\n if i.start is not None:\n try:\n start = operator.index(i.start)\n except TypeError:\n pass # handled by ndarray\n else:\n if not (-side <= start <= side):\n raise IndexError(\n f\"Slice {i} contains {start=}, but should be \"\n f\"{f_range} for an axis of size {side} \"\n \"(out-of-bounds starts are not specified in \"\n \"the Array API)\"\n )\n if i.stop is not None:\n try:\n stop = operator.index(i.stop)\n except TypeError:\n pass # handled by ndarray\n else:\n if not (-side <= stop <= side):\n raise IndexError(\n f\"Slice {i} contains {stop=}, but should be \"\n f\"{f_range} for an axis of size {side} \"\n \"(out-of-bounds stops are not specified in \"\n \"the Array API)\"\n )\n elif isinstance(i, Array):\n if i.dtype in _boolean_dtypes and len(_key) != 1:\n assert isinstance(key, tuple) # sanity check\n raise IndexError(\n f\"Single-axes index {i} is a boolean array and \"\n f\"{len(key)=}, but masking is only specified in the \"\n \"Array API when the array is the sole index.\"\n )\n elif i.dtype in _integer_dtypes and i.ndim != 0:\n raise IndexError(\n f\"Single-axes index {i} is a non-zero-dimensional \"\n \"integer array, but advanced integer indexing is not \"\n \"specified in the Array API.\"\n )\n elif isinstance(i, tuple):\n raise IndexError(\n f\"Single-axes index {i} is a tuple, but nested tuple \"\n \"indices are not specified in the Array API.\"\n )\n\n # Everything below this line is required by the spec.\n\n def __abs__(self: Array, /) -> Array:\n \"\"\"\n Performs the operation __abs__.\n \"\"\"\n if self.dtype not in _numeric_dtypes:\n raise TypeError(\"Only numeric dtypes are allowed in __abs__\")\n res = self._array.__abs__()\n return self.__class__._new(res)\n\n def __add__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __add__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__add__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__add__(other._array)\n return self.__class__._new(res)\n\n def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __and__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__and__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__and__(other._array)\n return self.__class__._new(res)\n\n def __array_namespace__(\n self: Array, /, *, api_version: Optional[str] = None\n ) -> types.ModuleType:\n if api_version is not None and not api_version.startswith(\"2021.\"):\n raise ValueError(f\"Unrecognized array API version: {api_version!r}\")\n return array_api\n\n def __bool__(self: Array, /) -> bool:\n \"\"\"\n Performs the operation __bool__.\n \"\"\"\n # Note: This is an error here.\n if self._array.ndim != 0:\n raise TypeError(\"bool is only allowed on arrays with 0 dimensions\")\n res = self._array.__bool__()\n return res\n\n def __complex__(self: Array, /) -> complex:\n \"\"\"\n Performs the operation __complex__.\n \"\"\"\n # Note: This is an error here.\n if self._array.ndim != 0:\n raise TypeError(\"complex is only allowed on arrays with 0 dimensions\")\n res = self._array.__complex__()\n return res\n\n def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:\n \"\"\"\n Performs the operation __dlpack__.\n \"\"\"\n return self._array.__dlpack__(stream=stream)\n\n def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:\n \"\"\"\n Performs the operation __dlpack_device__.\n \"\"\"\n # Note: device support is required for this\n return self._array.__dlpack_device__()\n\n def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __eq__.\n \"\"\"\n # Even though \"all\" dtypes are allowed, we still require them to be\n # promotable with each other.\n other = self._check_allowed_dtypes(other, \"all\", \"__eq__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__eq__(other._array)\n return self.__class__._new(res)\n\n def __float__(self: Array, /) -> float:\n \"\"\"\n Performs the operation __float__.\n \"\"\"\n # Note: This is an error here.\n if self._array.ndim != 0:\n raise TypeError(\"float is only allowed on arrays with 0 dimensions\")\n if self.dtype in _complex_floating_dtypes:\n raise TypeError(\"float is not allowed on complex floating-point arrays\")\n res = self._array.__float__()\n return res\n\n def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __floordiv__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__floordiv__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__floordiv__(other._array)\n return self.__class__._new(res)\n\n def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __ge__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__ge__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__ge__(other._array)\n return self.__class__._new(res)\n\n def __getitem__(\n self: Array,\n key: Union[\n int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array\n ],\n /,\n ) -> Array:\n \"\"\"\n Performs the operation __getitem__.\n \"\"\"\n # Note: Only indices required by the spec are allowed. See the\n # docstring of _validate_index\n self._validate_index(key)\n if isinstance(key, Array):\n # Indexing self._array with array_api arrays can be erroneous\n key = key._array\n res = self._array.__getitem__(key)\n return self._new(res)\n\n def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __gt__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__gt__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__gt__(other._array)\n return self.__class__._new(res)\n\n def __int__(self: Array, /) -> int:\n \"\"\"\n Performs the operation __int__.\n \"\"\"\n # Note: This is an error here.\n if self._array.ndim != 0:\n raise TypeError(\"int is only allowed on arrays with 0 dimensions\")\n if self.dtype in _complex_floating_dtypes:\n raise TypeError(\"int is not allowed on complex floating-point arrays\")\n res = self._array.__int__()\n return res\n\n def __index__(self: Array, /) -> int:\n \"\"\"\n Performs the operation __index__.\n \"\"\"\n res = self._array.__index__()\n return res\n\n def __invert__(self: Array, /) -> Array:\n \"\"\"\n Performs the operation __invert__.\n \"\"\"\n if self.dtype not in _integer_or_boolean_dtypes:\n raise TypeError(\"Only integer or boolean dtypes are allowed in __invert__\")\n res = self._array.__invert__()\n return self.__class__._new(res)\n\n def __le__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __le__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__le__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__le__(other._array)\n return self.__class__._new(res)\n\n def __lshift__(self: Array, other: Union[int, Array], /) -> Array:\n \"\"\"\n Performs the operation __lshift__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer\", \"__lshift__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__lshift__(other._array)\n return self.__class__._new(res)\n\n def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __lt__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__lt__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__lt__(other._array)\n return self.__class__._new(res)\n\n def __matmul__(self: Array, other: Array, /) -> Array:\n \"\"\"\n Performs the operation __matmul__.\n \"\"\"\n # matmul is not defined for scalars, but without this, we may get\n # the wrong error message from asarray.\n other = self._check_allowed_dtypes(other, \"numeric\", \"__matmul__\")\n if other is NotImplemented:\n return other\n res = self._array.__matmul__(other._array)\n return self.__class__._new(res)\n\n def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __mod__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__mod__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__mod__(other._array)\n return self.__class__._new(res)\n\n def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __mul__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__mul__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__mul__(other._array)\n return self.__class__._new(res)\n\n def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __ne__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"all\", \"__ne__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__ne__(other._array)\n return self.__class__._new(res)\n\n def __neg__(self: Array, /) -> Array:\n \"\"\"\n Performs the operation __neg__.\n \"\"\"\n if self.dtype not in _numeric_dtypes:\n raise TypeError(\"Only numeric dtypes are allowed in __neg__\")\n res = self._array.__neg__()\n return self.__class__._new(res)\n\n def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __or__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__or__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__or__(other._array)\n return self.__class__._new(res)\n\n def __pos__(self: Array, /) -> Array:\n \"\"\"\n Performs the operation __pos__.\n \"\"\"\n if self.dtype not in _numeric_dtypes:\n raise TypeError(\"Only numeric dtypes are allowed in __pos__\")\n res = self._array.__pos__()\n return self.__class__._new(res)\n\n def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __pow__.\n \"\"\"\n from ._elementwise_functions import pow\n\n other = self._check_allowed_dtypes(other, \"numeric\", \"__pow__\")\n if other is NotImplemented:\n return other\n # Note: NumPy's __pow__ does not follow type promotion rules for 0-d\n # arrays, so we use pow() here instead.\n return pow(self, other)\n\n def __rshift__(self: Array, other: Union[int, Array], /) -> Array:\n \"\"\"\n Performs the operation __rshift__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer\", \"__rshift__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rshift__(other._array)\n return self.__class__._new(res)\n\n def __setitem__(\n self,\n key: Union[\n int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array\n ],\n value: Union[int, float, bool, Array],\n /,\n ) -> None:\n \"\"\"\n Performs the operation __setitem__.\n \"\"\"\n # Note: Only indices required by the spec are allowed. See the\n # docstring of _validate_index\n self._validate_index(key)\n if isinstance(key, Array):\n # Indexing self._array with array_api arrays can be erroneous\n key = key._array\n self._array.__setitem__(key, asarray(value)._array)\n\n def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __sub__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__sub__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__sub__(other._array)\n return self.__class__._new(res)\n\n # PEP 484 requires int to be a subtype of float, but __truediv__ should\n # not accept int.\n def __truediv__(self: Array, other: Union[float, Array], /) -> Array:\n \"\"\"\n Performs the operation __truediv__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"floating-point\", \"__truediv__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__truediv__(other._array)\n return self.__class__._new(res)\n\n def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __xor__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__xor__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__xor__(other._array)\n return self.__class__._new(res)\n\n def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __iadd__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__iadd__\")\n if other is NotImplemented:\n return other\n self._array.__iadd__(other._array)\n return self\n\n def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __radd__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__radd__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__radd__(other._array)\n return self.__class__._new(res)\n\n def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __iand__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__iand__\")\n if other is NotImplemented:\n return other\n self._array.__iand__(other._array)\n return self\n\n def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __rand__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__rand__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rand__(other._array)\n return self.__class__._new(res)\n\n def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __ifloordiv__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__ifloordiv__\")\n if other is NotImplemented:\n return other\n self._array.__ifloordiv__(other._array)\n return self\n\n def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __rfloordiv__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__rfloordiv__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rfloordiv__(other._array)\n return self.__class__._new(res)\n\n def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:\n \"\"\"\n Performs the operation __ilshift__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer\", \"__ilshift__\")\n if other is NotImplemented:\n return other\n self._array.__ilshift__(other._array)\n return self\n\n def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:\n \"\"\"\n Performs the operation __rlshift__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer\", \"__rlshift__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rlshift__(other._array)\n return self.__class__._new(res)\n\n def __imatmul__(self: Array, other: Array, /) -> Array:\n \"\"\"\n Performs the operation __imatmul__.\n \"\"\"\n # matmul is not defined for scalars, but without this, we may get\n # the wrong error message from asarray.\n other = self._check_allowed_dtypes(other, \"numeric\", \"__imatmul__\")\n if other is NotImplemented:\n return other\n res = self._array.__imatmul__(other._array)\n return self.__class__._new(res)\n\n def __rmatmul__(self: Array, other: Array, /) -> Array:\n \"\"\"\n Performs the operation __rmatmul__.\n \"\"\"\n # matmul is not defined for scalars, but without this, we may get\n # the wrong error message from asarray.\n other = self._check_allowed_dtypes(other, \"numeric\", \"__rmatmul__\")\n if other is NotImplemented:\n return other\n res = self._array.__rmatmul__(other._array)\n return self.__class__._new(res)\n\n def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __imod__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__imod__\")\n if other is NotImplemented:\n return other\n self._array.__imod__(other._array)\n return self\n\n def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __rmod__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"real numeric\", \"__rmod__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rmod__(other._array)\n return self.__class__._new(res)\n\n def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __imul__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__imul__\")\n if other is NotImplemented:\n return other\n self._array.__imul__(other._array)\n return self\n\n def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __rmul__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__rmul__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rmul__(other._array)\n return self.__class__._new(res)\n\n def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __ior__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__ior__\")\n if other is NotImplemented:\n return other\n self._array.__ior__(other._array)\n return self\n\n def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __ror__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__ror__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__ror__(other._array)\n return self.__class__._new(res)\n\n def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __ipow__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__ipow__\")\n if other is NotImplemented:\n return other\n self._array.__ipow__(other._array)\n return self\n\n def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __rpow__.\n \"\"\"\n from ._elementwise_functions import pow\n\n other = self._check_allowed_dtypes(other, \"numeric\", \"__rpow__\")\n if other is NotImplemented:\n return other\n # Note: NumPy's __pow__ does not follow the spec type promotion rules\n # for 0-d arrays, so we use pow() here instead.\n return pow(other, self)\n\n def __irshift__(self: Array, other: Union[int, Array], /) -> Array:\n \"\"\"\n Performs the operation __irshift__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer\", \"__irshift__\")\n if other is NotImplemented:\n return other\n self._array.__irshift__(other._array)\n return self\n\n def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:\n \"\"\"\n Performs the operation __rrshift__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer\", \"__rrshift__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rrshift__(other._array)\n return self.__class__._new(res)\n\n def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __isub__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__isub__\")\n if other is NotImplemented:\n return other\n self._array.__isub__(other._array)\n return self\n\n def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:\n \"\"\"\n Performs the operation __rsub__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"numeric\", \"__rsub__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rsub__(other._array)\n return self.__class__._new(res)\n\n def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:\n \"\"\"\n Performs the operation __itruediv__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"floating-point\", \"__itruediv__\")\n if other is NotImplemented:\n return other\n self._array.__itruediv__(other._array)\n return self\n\n def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:\n \"\"\"\n Performs the operation __rtruediv__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"floating-point\", \"__rtruediv__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rtruediv__(other._array)\n return self.__class__._new(res)\n\n def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __ixor__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__ixor__\")\n if other is NotImplemented:\n return other\n self._array.__ixor__(other._array)\n return self\n\n def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:\n \"\"\"\n Performs the operation __rxor__.\n \"\"\"\n other = self._check_allowed_dtypes(other, \"integer or boolean\", \"__rxor__\")\n if other is NotImplemented:\n return other\n self, other = self._normalize_two_args(self, other)\n res = self._array.__rxor__(other._array)\n return self.__class__._new(res)\n\n def to_device(self: Array, device: Device, /, stream: None = None) -> Array:\n if stream is not None:\n raise ValueError(\"The stream argument to to_device() is not supported\")\n if device == 'cpu':\n return self\n raise ValueError(f\"Unsupported device {device!r}\")\n\n @property\n def dtype(self) -> Dtype:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndarray.dtype <numpy.ndarray.dtype>`.\n\n See its docstring for more information.\n \"\"\"\n return self._array.dtype\n\n @property\n def device(self) -> Device:\n return \"cpu\"\n\n # Note: mT is new in array API spec (see matrix_transpose)\n @property\n def mT(self) -> Array:\n from .linalg import matrix_transpose\n return matrix_transpose(self)\n\n @property\n def ndim(self) -> int:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndarray.ndim <numpy.ndarray.ndim>`.\n\n See its docstring for more information.\n \"\"\"\n return self._array.ndim\n\n @property\n def shape(self) -> Tuple[int, ...]:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndarray.shape <numpy.ndarray.shape>`.\n\n See its docstring for more information.\n \"\"\"\n return self._array.shape\n\n @property\n def size(self) -> int:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndarray.size <numpy.ndarray.size>`.\n\n See its docstring for more information.\n \"\"\"\n return self._array.size\n\n @property\n def T(self) -> Array:\n \"\"\"\n Array API compatible wrapper for :py:meth:`np.ndarray.T <numpy.ndarray.T>`.\n\n See its docstring for more information.\n \"\"\"\n # Note: T only works on 2-dimensional arrays. See the corresponding\n # note in the specification:\n # https://data-apis.org/array-api/latest/API_specification/array_object.html#t\n if self.ndim != 2:\n raise ValueError(\"x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.\")\n return self.__class__._new(self._array.T)" }, { "identifier": "normalize_axis_tuple", "path": "backend/venv/lib/python3.10/site-packages/numpy/core/numeric.py", "snippet": "def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):\n \"\"\"\n Normalizes an axis argument into a tuple of non-negative integer axes.\n\n This handles shorthands such as ``1`` and converts them to ``(1,)``,\n as well as performing the handling of negative indices covered by\n `normalize_axis_index`.\n\n By default, this forbids axes from being specified multiple times.\n\n Used internally by multi-axis-checking logic.\n\n .. versionadded:: 1.13.0\n\n Parameters\n ----------\n axis : int, iterable of int\n The un-normalized index or indices of the axis.\n ndim : int\n The number of dimensions of the array that `axis` should be normalized\n against.\n argname : str, optional\n A prefix to put before the error message, typically the name of the\n argument.\n allow_duplicate : bool, optional\n If False, the default, disallow an axis from being specified twice.\n\n Returns\n -------\n normalized_axes : tuple of int\n The normalized axis index, such that `0 <= normalized_axis < ndim`\n\n Raises\n ------\n AxisError\n If any axis provided is out of range\n ValueError\n If an axis is repeated\n\n See also\n --------\n normalize_axis_index : normalizing a single scalar axis\n \"\"\"\n # Optimization to speed-up the most common cases.\n if type(axis) not in (tuple, list):\n try:\n axis = [operator.index(axis)]\n except TypeError:\n pass\n # Going via an iterator directly is slower than via list comprehension.\n axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])\n if not allow_duplicate and len(set(axis)) != len(axis):\n if argname:\n raise ValueError('repeated axis in `{}` argument'.format(argname))\n else:\n raise ValueError('repeated axis')\n return axis" } ]
from ._dtypes import ( _floating_dtypes, _numeric_dtypes, float32, float64, complex64, complex128 ) from ._manipulation_functions import reshape from ._array_object import Array from ..core.numeric import normalize_axis_tuple from typing import TYPE_CHECKING from ._typing import Literal, Optional, Sequence, Tuple, Union, Dtype from typing import NamedTuple from ..linalg.linalg import (_makearray, _assert_stacked_2d, _assert_stacked_square, _commonType, isComplexType, get_linalg_error_extobj, _raise_linalgerror_singular) from ..linalg import _umath_linalg import numpy.linalg import numpy as np
12,080
from __future__ import annotations if TYPE_CHECKING: class EighResult(NamedTuple): eigenvalues: Array eigenvectors: Array class QRResult(NamedTuple): Q: Array R: Array class SlogdetResult(NamedTuple): sign: Array logabsdet: Array class SVDResult(NamedTuple): U: Array S: Array Vh: Array # Note: the inclusion of the upper keyword is different from # np.linalg.cholesky, which does not have it. def cholesky(x: Array, /, *, upper: bool = False) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.cholesky. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in cholesky') L = np.linalg.cholesky(x._array) if upper: return Array._new(L).mT return Array._new(L) # Note: cross is the numpy top-level namespace, not np.linalg def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: """ Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`. See its docstring for more information. """
from __future__ import annotations if TYPE_CHECKING: class EighResult(NamedTuple): eigenvalues: Array eigenvectors: Array class QRResult(NamedTuple): Q: Array R: Array class SlogdetResult(NamedTuple): sign: Array logabsdet: Array class SVDResult(NamedTuple): U: Array S: Array Vh: Array # Note: the inclusion of the upper keyword is different from # np.linalg.cholesky, which does not have it. def cholesky(x: Array, /, *, upper: bool = False) -> Array: """ Array API compatible wrapper for :py:func:`np.linalg.cholesky <numpy.linalg.cholesky>`. See its docstring for more information. """ # Note: the restriction to floating-point dtypes only is different from # np.linalg.cholesky. if x.dtype not in _floating_dtypes: raise TypeError('Only floating-point dtypes are allowed in cholesky') L = np.linalg.cholesky(x._array) if upper: return Array._new(L).mT return Array._new(L) # Note: cross is the numpy top-level namespace, not np.linalg def cross(x1: Array, x2: Array, /, *, axis: int = -1) -> Array: """ Array API compatible wrapper for :py:func:`np.cross <numpy.cross>`. See its docstring for more information. """
if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
0
2023-10-23 18:09:28+00:00
16k
zju3dv/nr_in_a_room
test/test_light_adaptation.py
[ { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n chunk: int,\n model_ckpt_path_dict: Dict[str, Any],\n config=None,\n scale_factor_dict: Dict[str, Any] = {},\n scene_info_path: str = None,\n scene_info_json_path: str = None,\n model_type=\"NeuS\",\n N_samples: int = 64,\n N_importance: int = 128,\n relation_info: Dict[str, Any] = {},\n output_path: str = None,\n prefix: str = \"\",\n active_instance_id: list = [46, 4, 9, 102],\n virtual_instance_id: list = [], # specific for edit (insert virtual to real) mode\n filter_door_and_window: bool = True,\n lr: float = 1e-2,\n N_optim_step: int = 500,\n adjust_lr_per_step: int = 150,\n optim_batch_size: int = 1024,\n use_amp: bool = False,\n extract_obj_bbox_from_neural_model: bool = False,\n ig_data_base_dir: str = \"data/ig_dataset_v1.0.1/\",\n mask_per_object: bool = False,\n bbox_ray_intersect: bool = True,\n bbox_enlarge: float = 0.1,\n optimize_light_env: bool = True,\n optimize_appearance_code: bool = False,\n use_light_from_image_attr: bool = False,\n use_appearance_from_image_attr: bool = False,\n optimize_option: list = [\n \"photometric_loss\",\n \"perceptual_loss\",\n \"z_axis_align_loss\",\n \"object_room_wall_attach\",\n \"object_room_floor_attach\",\n \"physical_violation\",\n \"object_object_attach\",\n ],\n ):\n # load config\n self.scene_info_path = scene_info_path\n self.scale_factor = scale_factor\n self.scale_factor_dict = scale_factor_dict\n self.bg_scale_factor = bg_scale_factor\n self.bg_scene_center = np.array(bg_scene_center)\n self.ig_data_base_dir = ig_data_base_dir\n self.mask_per_object = mask_per_object\n self.bbox_ray_intersect = bbox_ray_intersect\n self.bbox_enlarge = bbox_enlarge\n self.virtual_instance_id = virtual_instance_id\n\n self.img_wh = img_wh\n self.w = img_wh[0]\n self.h = img_wh[1]\n self.near = near\n self.far = far\n self.N_importance = N_importance\n self.N_samples = N_samples\n self.chunk = chunk\n self.lr = lr\n self.N_optim_step = N_optim_step\n self.adjust_lr_per_step = adjust_lr_per_step\n self.optim_batch_size = optim_batch_size\n self.use_amp = use_amp\n self.optimize_light_env = optimize_light_env\n self.optimize_appearance_code = optimize_appearance_code\n self.optimize_option = optimize_option\n self.config = config\n\n self.use_light_from_image_attr = use_light_from_image_attr\n if self.use_light_from_image_attr:\n print(\n \"WARNING: self.use_light_from_image_attr = True, using hard coded light env.\"\n )\n self.hard_coded_light_id = 0 # just for compatibility\n # self.hard_coded_light_id = 9 # probe_03 in 10 HDR multi_light training\n\n self.use_appearance_from_image_attr = use_appearance_from_image_attr\n if self.use_appearance_from_image_attr:\n print(\n \"WARNING: self.use_appearance_from_image_attr = True, using first frame appearance code.\"\n )\n self.hard_coded_appearance_frame_id = 0\n\n self.optimize_exposure = \"optimize_exposure\" in self.optimize_option\n\n # laod scene info\n if scene_info_json_path is None:\n scene_info_json_path = os.path.join(scene_info_path, \"data.json\")\n self.scene_meta = read_json(scene_info_json_path)\n\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n self.relation_info = relation_info\n\n self.model_type = model_type\n # self.load_model(\n # model_type, model_ckpt_path_dict[\"obj\"], model_ckpt_path_dict[\"bg\"]\n # )\n self.load_model_from_dict_path(model_type, model_ckpt_path_dict)\n\n self.reset_optimizable_parameters()\n\n if extract_obj_bbox_from_neural_model:\n self.extract_bounding_boxes_from_neural_model()\n\n if self.bbox_ray_intersect:\n self.prepare_bbox_ray_helper()\n\n self.set_output_path(output_path, prefix)\n\n print(\"RoomOptimizer initialize finished.\")\n\n def load_model_from_dict_path(self, model_type, model_ckpt_path_dict):\n assert model_type == \"NeuS\"\n self.models = {}\n self.image_attrs = {}\n\n # avoid duplicate loading\n self.models_cache = {}\n self.image_attrs_cache = {}\n\n print(\"loading model with instance_id\", self.active_instance_id)\n\n # print(model_ckpt_path_dict)\n for obj_id in self.active_instance_id:\n # identify ckpt_path\n if str(obj_id) in model_ckpt_path_dict:\n ckpt_info = model_ckpt_path_dict[str(obj_id)]\n elif obj_id == 0:\n assert (\n \"bg\" in model_ckpt_path_dict or \"0\" in model_ckpt_path_dict\n ), \"model_ckpt_path_dict missing background 'bg' or '0' ckpt\"\n ckpt_info = model_ckpt_path_dict.get(\"bg\", model_ckpt_path_dict[\"0\"])\n else:\n print(\n f\"Cannot find specific model for obj_id = {obj_id}, \\\n maybe config file is not compatible with given active_instance_id.\"\n )\n ckpt_info = model_ckpt_path_dict[\"obj\"]\n # load with cache\n ckpt_path, neus_conf = ckpt_info[\"path\"], ckpt_info[\"neus_conf\"]\n if ckpt_info not in self.models_cache:\n (\n self.models_cache[ckpt_path],\n self.image_attrs_cache[ckpt_path],\n ) = self.load_model_neus(ckpt_path, obj_id, neus_conf)\n self.models[f\"neus_{obj_id}\"] = self.models_cache[ckpt_path]\n self.image_attrs[str(obj_id)] = self.image_attrs_cache[ckpt_path]\n\n def load_model_nerf(self, ckpt_path):\n # TODO(ybbbbt): fix hard coding\n conf = {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n }\n nerf_coarse = NeRF_Object(conf)\n nerf_fine = NeRF_Object(conf)\n image_attributes = ImageAttributes(conf)\n load_ckpt(nerf_coarse, ckpt_path, model_name=\"nerf_coarse\")\n load_ckpt(nerf_fine, ckpt_path, model_name=\"nerf_fine\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n nerf_coarse = nerf_coarse.cuda().eval()\n nerf_fine = nerf_fine.cuda().eval()\n image_attributes = image_attributes.cuda().eval()\n\n models = {\n \"coarse\": nerf_coarse,\n \"fine\": nerf_fine,\n }\n\n embedding_xyz = Embedding(3, 10)\n embedding_dir = Embedding(3, 4)\n embeddings = {\n \"xyz\": embedding_xyz,\n \"dir\": embedding_dir,\n }\n return models, embeddings, image_attributes\n\n def load_model_neus(self, ckpt_path, obj_id, config_path=\"config/neus.yaml\"):\n conf = {\n \"model\": {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n },\n }\n if self.optimize_light_env:\n # conf[\"model\"].update({\"N_max_lights\": 128, \"N_light_embedding\": 16})\n conf[\"model\"].update({\"N_max_lights\": 1024, \"N_light_embedding\": 16})\n\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n conf[\"model\"].update(\n {\"N_max_appearance_frames\": 10000, \"N_appearance_embedding\": 16}\n )\n\n neus, render_kwargs_train, render_kwargs_test = get_model_neus(\n config_path=config_path, need_trainer=False, extra_conf=conf\n )\n self.render_kwargs_neus = render_kwargs_test\n image_attributes = ImageAttributes(conf[\"model\"])\n\n print(ckpt_path)\n load_ckpt(neus, ckpt_path, model_name=\"neus\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n if self.config is not None and (\n str(obj_id) in self.config.get(\"map_virtual_to_local\", {})\n ):\n # image_attributes.embedding_instance\n real_id_in_ckpt = self.config.map_virtual_to_local[str(obj_id)]\n image_attributes.embedding_instance.weight.requires_grad = False\n image_attributes.embedding_instance.weight[\n obj_id\n ] = image_attributes.embedding_instance.weight[real_id_in_ckpt]\n # ipdb.set_trace()\n\n neus.cuda().eval()\n image_attributes.cuda().eval()\n return neus, image_attributes\n\n def reset_optimizable_parameters(self):\n self.params = []\n self.relation_info = {}\n if self.optimize_light_env:\n self.initialize_light_code()\n\n if self.optimize_appearance_code:\n self.initialize_appearance_code()\n\n if self.optimize_exposure:\n self.initialize_autoexposure()\n\n def save_optimizable_parameters(self, path):\n all_param_dict = {}\n # all_param_dict[\"params\"] = self.params\n all_param_dict[\"relation_info\"] = self.relation_info\n all_param_dict[\"object_pose_dict\"] = copy.deepcopy(self.object_pose_dict)\n all_param_dict[\"active_instance_id\"] = copy.deepcopy(self.active_instance_id)\n if self.optimize_light_env:\n all_param_dict[\"light_code\"] = copy.deepcopy(self.light_code_dict)\n if self.optimize_appearance_code:\n all_param_dict[\"appearance_code\"] = copy.deepcopy(self.appearance_code_dict)\n if self.optimize_exposure:\n all_param_dict[\"exposure\"] = copy.deepcopy(self.autoexposure_param)\n torch.save(all_param_dict, path)\n\n def load_optimizable_parameters(self, path):\n all_param_dict = torch.load(path)\n # self.params = all_param_dict[\"params\"]\n self.relation_info = all_param_dict[\"relation_info\"]\n if len(self.virtual_instance_id) == 0: # not overwrite in edit mode\n self.active_instance_id = all_param_dict[\"active_instance_id\"]\n\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if len(self.virtual_instance_id) == 0: # not modify edit mode pose\n if hasattr(self, \"object_pose_dict\"):\n self.object_pose_dict.update(all_param_dict[\"object_pose_dict\"])\n else:\n self.object_pose_dict = all_param_dict[\"object_pose_dict\"]\n if self.optimize_light_env:\n self.light_code_dict = all_param_dict[\"light_code\"]\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n self.appearance_code_dict = all_param_dict[\"appearance_code\"]\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure and \"exposure\" in all_param_dict:\n self.autoexposure_param = all_param_dict[\"exposure\"]\n to_gpu(self.autoexposure_param)\n # ipdb.set_trace()\n\n def interpolate_light_env_from_states(self, path1, path2, interp):\n all_param_dict_1 = torch.load(path1)\n all_param_dict_2 = torch.load(path2)\n\n # self.params = all_param_dict[\"params\"]\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if self.optimize_light_env:\n light_code_dict_1 = all_param_dict_1[\"light_code\"]\n light_code_dict_2 = all_param_dict_2[\"light_code\"]\n for k, v in self.light_code_dict.items():\n self.light_code_dict[k] = light_code_dict_1[\n k\n ] * interp + light_code_dict_2[k] * (1 - interp)\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n appearance_code_dict_1 = all_param_dict_1[\"appearance_code\"]\n appearance_code_dict_2 = all_param_dict_2[\"appearance_code\"]\n for k, v in self.appearance_code_dict.items():\n self.appearance_code_dict[k] = appearance_code_dict_1[\n k\n ] * interp + appearance_code_dict_2[k] * (1 - interp)\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure:\n autoexposure_param_1 = all_param_dict_1[\"exposure\"]\n autoexposure_param_2 = all_param_dict_2[\"exposure\"]\n for k, v in self.autoexposure_param.items():\n self.autoexposure_param[k] = autoexposure_param_1[\n k\n ] * interp + autoexposure_param_2[k] * (1 - interp)\n to_gpu(self.autoexposure_param)\n\n def reset_active_instance_id(self, active_instance_id, filter_door_and_window=True):\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n def set_output_path(self, output_path: str, prefix: str, with_timestamp=True):\n if output_path is not None:\n if with_timestamp:\n self.output_path = os.path.join(\n output_path, f\"rendered_{get_timestamp()}_{prefix}\"\n )\n else:\n self.output_path = os.path.join(output_path, f\"{prefix}\")\n os.makedirs(self.output_path, exist_ok=True)\n\n def filter_door_and_window(self):\n print(\"Filtering door and window objects.\")\n filtered_active_instance_id = []\n for obj_id in self.active_instance_id:\n if self.get_type_of_instance(obj_id) not in [\"door\", \"window\"]:\n filtered_active_instance_id += [obj_id]\n self.active_instance_id = filtered_active_instance_id\n\n def initialize_light_code(self):\n self.light_code_dict = {}\n for obj_id in self.active_instance_id:\n # light_code = torch.randn((16)).cuda()\n light_code = torch.zeros((16)).cuda()\n light_code.requires_grad = True\n self.params += [\n {\"params\": light_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.light_code_dict[str(obj_id)] = light_code\n\n def initialize_appearance_code(self):\n self.appearance_code_dict = {}\n for obj_id in self.active_instance_id:\n # appearance_code = torch.randn((16)).cuda()\n appearance_code = torch.zeros((16)).cuda()\n appearance_code.requires_grad = True\n self.params += [\n {\"params\": appearance_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.appearance_code_dict[str(obj_id)] = appearance_code\n\n def initialize_autoexposure(self):\n self.autoexposure_param = {}\n for obj_id in self.active_instance_id:\n # scale and shift\n autoexposure_param = torch.Tensor([1, 1, 1, 0, 0, 0]).cuda()\n autoexposure_param.requires_grad = True\n self.params += [\n {\"params\": autoexposure_param, \"lr\": self.lr * 0.1}\n ] # light code can be optimized with larger lr\n self.autoexposure_param[str(obj_id)] = autoexposure_param\n\n def get_scale_factor(self, obj_id):\n if obj_id == 0:\n return self.bg_scale_factor\n elif str(obj_id) in self.scale_factor_dict:\n return self.scale_factor_dict[str(obj_id)]\n else:\n return self.scale_factor\n\n def extract_bounding_boxes_from_neural_model(self):\n print(\"Extracting object bounding boxes from neural model...\")\n assert self.model_type == \"NeuS\"\n for obj_id in tqdm(self.active_instance_id):\n mesh = extract_mesh_from_neus(\n self.models[f\"neus_{obj_id}\"],\n self.image_attrs[str(obj_id)],\n obj_id,\n )\n bbox = mesh.get_axis_aligned_bounding_box()\n bound = np.array([bbox.min_bound, bbox.max_bound])\n size = (bound[1] - bound[0]) * self.get_scale_factor(obj_id)\n # update scene_meta\n for idx, obj_info in enumerate(self.scene_meta[\"objs\"]):\n if obj_info[\"id\"] == obj_id:\n self.scene_meta[\"objs\"][idx][\"bdb3d\"][\"size\"] = size.tolist()\n\n def prepare_bbox_ray_helper(self):\n # bbox ray helper dict\n self.bbox_ray_helper_dict = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n length = np.array(obj_meta_info[\"bbox3d\"][\"size\"])\n self.bbox_ray_helper_dict[str(obj_id)] = BBoxRayHelper(np.zeros(3), length)\n\n def generate_object_rays(\n self, rays_o_obj, rays_d_obj, obj_id, near=None, far=None, select_ind=None\n ):\n \"\"\"\n Generate object rays given rays_o, rays_d and obj_id\n Input:\n select_ind: only for masked rendering\n \"\"\"\n if obj_id == 0: # background\n return self.generate_bg_rays(rays_o_obj, rays_d_obj, near=near, far=far)\n if self.bbox_ray_intersect:\n # for object, rays_o and rays_d should lie in world scale (unscaled)\n bbox_mask, bbox_batch_near, bbox_batch_far = self.bbox_ray_helper_dict[\n str(obj_id)\n ].get_ray_bbox_intersections(\n rays_o_obj,\n rays_d_obj,\n self.get_scale_factor(obj_id),\n # bbox_enlarge=self.bbox_enlarge / self.get_scale_factor(obj_id),\n bbox_enlarge=self.bbox_enlarge, # in physical world\n )\n # for area which hits bbox, we use bbox hit near far\n # bbox_ray_helper has scale for us, do no need to rescale\n batch_near_obj, batch_far_obj = bbox_batch_near, bbox_batch_far\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n # for the invalid part, we use 0 as near far, which assume that (0, 0, 0) is empty\n batch_near_obj[~bbox_mask] = torch.zeros_like(batch_near_obj[~bbox_mask])\n batch_far_obj[~bbox_mask] = torch.zeros_like(batch_far_obj[~bbox_mask])\n else:\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_obj = (\n near\n / self.get_scale_factor(obj_id)\n * torch.ones_like(rays_o_obj[:, :1])\n )\n batch_far_obj = (\n far / self.get_scale_factor(obj_id) * torch.ones_like(rays_d_obj[:, :1])\n )\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n\n if self.mask_per_object:\n # mask out of bound rendering\n obj_mask = torch.from_numpy(self.instance_mask == obj_id).view(-1)\n obj_mask = obj_mask[select_ind]\n batch_near_obj[~obj_mask] = 0\n batch_far_obj[~obj_mask] = 0\n\n rays_obj = torch.cat(\n [rays_o_obj, rays_d_obj, batch_near_obj, batch_far_obj], 1\n ) # (H*W, 8)\n rays_obj = rays_obj.cuda()\n return rays_obj\n\n def generate_bg_rays(self, rays_o_bg, rays_d_bg, near=None, far=None):\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_bg = near / self.bg_scale_factor * torch.ones_like(rays_o_bg[:, :1])\n batch_far_bg = far / self.bg_scale_factor * torch.ones_like(rays_d_bg[:, :1])\n rays_o_bg = rays_o_bg / self.bg_scale_factor\n rays_bg = torch.cat(\n [rays_o_bg, rays_d_bg, batch_near_bg, batch_far_bg], 1\n ) # (H*W, 8)\n rays_bg = rays_bg.cuda()\n return rays_bg\n\n def batched_inference_multi(\n self,\n rays_list,\n obj_id_list,\n to_cpu=True,\n hit_test_only=False,\n need_normal=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=True,\n refine_edge=False,\n refine_edge_obj_ids=[],\n render_mask=False,\n # use_sphere_tracing=False,\n show_progress=False,\n **kwargs,\n ):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays_list[0].shape[0]\n results = defaultdict(list)\n for i in tqdm(range(0, B, self.chunk), disable=not show_progress):\n extra_chunk = dict()\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor) and \"autoexposure_\" not in k:\n extra_chunk[k] = v[i : i + self.chunk]\n else:\n extra_chunk[k] = v\n if self.model_type == \"NeRF\":\n rendered_ray_chunks = render_rays_multi(\n self.models,\n self.embeddings,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n self.N_samples,\n use_disp=False,\n perturb=0.001,\n # perturb=0.00,\n noise_std=0,\n N_importance=self.N_importance,\n chunk=self.chunk,\n white_back=True,\n individual_weight_for_coarse=True,\n obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n **extra_chunk,\n )\n elif self.model_type == \"NeuS\":\n rendered_ray_chunks = render_rays_multi_neus(\n self,\n self.models,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n noise_std=0,\n white_back=True,\n # white_back=False,\n # obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n hit_test_only=hit_test_only,\n need_normal=need_normal,\n use_sphere_tracing=use_sphere_tracing,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n refine_edge_obj_ids=refine_edge_obj_ids,\n render_mask=render_mask,\n extra_dict=extra_chunk,\n render_kwargs=self.render_kwargs_neus,\n )\n\n for k, v in rendered_ray_chunks.items():\n if to_cpu:\n results[k] += [v.cpu()]\n else:\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def render_full_scene(\n self,\n pose: np.ndarray,\n idx: int,\n h: int,\n w: int,\n write_idx_on_image=True,\n return_raw_image=False,\n render_mask=False,\n refine_edge=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=False,\n show_progress=False,\n refine_edge_obj_ids=[],\n fovx_deg=0,\n ):\n extra_dict = dict()\n extra_dict[\"compute_3d_mask\"] = False\n extra_dict[\"is_eval\"] = True\n\n rays_list = []\n object_id_list = []\n\n if fovx_deg > 0:\n focal = (w / 2) / np.tan((fovx_deg / 2) / (180 / np.pi))\n print(\"focal =\", focal)\n directions = get_ray_directions(h, w, focal).cuda() # (h, w, 3)\n else:\n directions = get_ray_directions_equirectangular(h, w).cuda() # (h, w, 3)\n\n for obj_id in self.active_instance_id:\n # get object location\n # Two: object to world pose\n if obj_id == 0: # 0 denotes background\n Two = np.eye(4)\n Two[:3, 3] = self.bg_scene_center\n else: # other objects\n Two = torch.eye(4).cuda()\n Two[:3, :3] = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n Two[:3, 3] = self.object_pose_dict[str(obj_id)][\"trans\"]\n Two = Two.detach().cpu().numpy()\n # pose: Twc\n # we need: Toc\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n\n Toc = np.linalg.inv(Two) @ Twc\n\n Toc = torch.from_numpy(Toc).float().cuda()[:3, :4]\n rays_o, rays_d = get_rays(directions, Toc)\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id)\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr or obj_id in self.virtual_instance_id:\n if not hasattr(self, \"hard_code_light_id\"):\n self.hard_coded_light_id = 0\n extra_dict[\"embedding_light_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n\n # optimize exposure\n if self.optimize_exposure and obj_id not in self.virtual_instance_id:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n with torch.cuda.amp.autocast(enabled=True):\n with torch.no_grad():\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n use_sphere_tracing=use_sphere_tracing,\n # use_sphere_tracing=True,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n render_mask=render_mask,\n show_progress=show_progress,\n **extra_dict,\n )\n img = results[f\"rgb_fine\"]\n img_pred = np.clip(img.view(h, w, 3).cpu().numpy(), 0, 1)\n img_pred_ = (img_pred * 255).astype(np.uint8)\n\n if return_raw_image:\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0]\n .cpu()\n .numpy()\n .round()\n .astype(np.uint16)\n )\n return img_pred_, img_mask\n return img_pred_ # raw image in [h, w, 3] np.uint8\n\n if write_idx_on_image:\n img_pred_ = cv2.putText(\n img_pred_,\n \"Iter: {:03d}\".format(idx),\n (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (255, 0, 0),\n 2,\n )\n\n imageio.imwrite(\n os.path.join(self.output_path, f\"{idx:06d}.multi_obj.png\"), img_pred_\n )\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0].cpu().numpy().round().astype(np.uint16)\n )\n cv2.imwrite(os.path.join(self.output_path, f\"{idx:06d}.seg.png\"), img_mask)\n\n def set_initial_object_poses_from_scene_meta(self, add_noise=True):\n self.object_pose_dict = {}\n\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n if \"gt_T_wo\" in obj_meta_info:\n Two = obj_meta_info[\"gt_T_wo\"]\n else:\n print(\n f\"Cannot find object pose for obj_id = {obj_id}, use custom pose with minor offset.\"\n )\n Two = np.eye(4)\n from scipy.spatial.transform import Rotation as R\n\n rot_fix = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)\n # TODO: update initial pose for real-world scenes\n # if obj_id == 31:\n # blender_xyz = np.array([-1.44, 1.18, 0.1])\n # blender_rot = R.from_quat([0.5, -0.5, 0.5, 0.5]).as_matrix()\n # elif obj_id == 32:\n # blender_xyz = np.array([0.76, 0.54, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n # elif obj_id == 33:\n # blender_xyz = np.array([-0.06, 1.01, -0.9])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 34:\n # blender_xyz = np.array([-0.05, 1.14, -0.15])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 35:\n # blender_xyz = np.array([-0.35, 1.1, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n\n # Two[:3, :3] = blender_rot @ rot_fix\n # Two[:3, :3] = rot_fix @ blender_rot\n # Two[:3, 3] = rot_fix @ blender_xyz\n\n # Two[1, 3] += 0.75\n # Two[2, 3] -= 0.7\n\n # add noise\n if add_noise:\n Two[:3, 3] += 0.1\n from scipy.spatial.transform import Rotation as R\n\n rot_noise = R.from_euler(\"z\", 20, degrees=True).as_matrix()\n Two[:3, :3] = Two[:3, :3] @ rot_noise\n Two = torch.from_numpy(Two).float().cuda()\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n if \"fix_object_pose\" not in self.optimize_option:\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_from_prediction(self, pred_json_path):\n print(\"Initial pose from\", pred_json_path)\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n pred_info = read_json(pred_json_path)\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.array(pred_info[str(obj_id)][\"Two\"])\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n\n if not \"fix_object_pose\" in self.optimize_option:\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_as_identity(self):\n print(\"Initial pose as identity.\")\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.eye(4)\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_sampling_mask_from_seg(\n self,\n seg_mask=None,\n seg_mask_path=None,\n add_noise_to_seg=0,\n convert_seg_mask_to_box_mask=False,\n ):\n if seg_mask_path is not None:\n print(\"Read segmentation from gt mask\")\n # read mask\n self.instance_mask = get_instance_mask(seg_mask_path, img_wh=self.img_wh)\n elif seg_mask is not None:\n self.instance_mask = seg_mask\n else:\n print(\"Warning: empty mask\")\n self.merged_mask = (\n np.ones((self.img_wh[1], self.img_wh[0])).reshape(-1).astype(bool)\n )\n return\n\n # merge active object masks\n merged_mask = np.zeros_like(self.instance_mask)\n for i_obj, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue # do not accumulate background obj_id\n instance_mask_obj = self.instance_mask == obj_id\n # use tightly fit bbox instead of segmentation mask\n if convert_seg_mask_to_box_mask:\n instance_mask_obj = seg_mask_to_box_mask(instance_mask_obj)\n merged_mask = np.logical_or(merged_mask, instance_mask_obj)\n\n # if add noise to gt segmentation\n if add_noise_to_seg != 0:\n is_dilate = add_noise_to_seg > 0\n add_noise_to_seg = abs(add_noise_to_seg)\n kernel = np.ones((add_noise_to_seg, add_noise_to_seg), np.uint8)\n if is_dilate:\n merged_mask = cv2.dilate(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n else:\n merged_mask = cv2.erode(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n cv2.imwrite(\n f\"{self.output_path}/merged_mask.png\", merged_mask.astype(np.uint8) * 255\n )\n self.merged_mask = merged_mask.reshape(-1)\n\n def get_type_of_instance(self, instance_id):\n for obj_info in self.scene_meta[\"objs\"]:\n if obj_info[\"id\"] == instance_id:\n return obj_info[\"classname\"]\n return \"unknown\"\n\n def generate_relation(\n self,\n obj_to_room_distance_th: float = 0.5,\n top_down_dist_th: float = 0.3,\n top_down_xy_close_factor: float = 0.8,\n ):\n \"\"\"\n Generate relationship : object-wall, object-floor, object-object\n \"\"\"\n print(\"Start to generate relation from initial poses and neural models...\")\n all_obj_info = {}\n for i, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue\n Rwo = rotation_6d_to_matrix(self.object_pose_dict[str(obj_id)][\"rot6d\"])\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n all_obj_info[str(obj_id)] = optimized_meta\n with torch.no_grad():\n generate_relation_for_all(\n room_optimizer=self,\n all_obj_info=all_obj_info,\n obj_to_room_distance_th=obj_to_room_distance_th,\n top_down_dist_th=top_down_dist_th,\n top_down_xy_close_factor=top_down_xy_close_factor,\n )\n # print(\"Relation:\\n\", self.relation_info)\n for k, v in self.relation_info.items():\n print(k, v)\n\n def optimize(self, input_rgb: torch.Tensor, pose=None):\n \"\"\"\n Inputs:\n input_rgb: torch.Tensor [h, w, 3] normalized in 0...1\n \"\"\"\n if pose is None:\n pose = np.array(self.scene_meta[\"camera\"][\"cam3d2world\"]).reshape(4, 4)\n # Original poses has rotation in form \"right down forward\", change to NDC \"right up back\"\n fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n pose[:3, :3] = pose[:3, :3] @ fix_rot\n\n # camera to world pose\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n Twc = torch.from_numpy(Twc).float().cuda()\n\n if \"keypoint_mask\" in self.optimize_option:\n # detect keypoint for interest region\n keypoint_mask = detect_keypoints(input_rgb.numpy(), circle_radius=5)\n self.merged_mask = np.logical_and(\n keypoint_mask, self.merged_mask.reshape(keypoint_mask.shape)\n )\n cv2.imwrite(\n f\"{self.output_path}/merged_mask_keypoint.png\",\n self.merged_mask.astype(np.uint8) * 255,\n )\n self.merged_mask = self.merged_mask.reshape(-1)\n\n input_rgb = input_rgb.view(-1, 3) # (H*W, 3) RGB\n\n directions = get_ray_directions_equirectangular(\n self.h, self.w\n ).cuda() # (h, w, 3)\n\n mse_loss = nn.MSELoss(reduction=\"none\")\n\n assert hasattr(\n self, \"params\"\n ), \"Please set initial pose params before optimization.\"\n optimizer = torch.optim.Adam(self.params)\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)\n perceptual_net = perceptual_model.VGG16_for_Perceptual().cuda()\n\n sample_prob = pano_sample_probability(self.h, self.w).reshape(-1)\n\n t = trange(self.N_optim_step, desc=\"Opt.\", leave=True)\n for i_step in t:\n if \"regenerate_relation_during_test\" in self.optimize_option:\n if i_step != 0 and i_step % 50 == 0:\n self.generate_relation()\n if self.adjust_lr_per_step > 0:\n adjust_learning_rate(\n self.lr,\n optimizer,\n i_step,\n base=0.5,\n adjust_lr_every=self.adjust_lr_per_step,\n )\n extra_dict = dict()\n rays_list = []\n object_id_list = []\n # sample according to batch size limitation\n select_ind = np.arange(self.merged_mask.shape[0])[self.merged_mask]\n if (\n \"perceptual_loss\" not in self.optimize_option\n ): # we only sample some points in this case\n # sample according to pano distribution\n select_sample_prob = sample_prob[self.merged_mask]\n select_sample_prob /= select_sample_prob.sum()\n # assert select_ind.shape[0] > self.optim_batch_size\n sample_size = min(select_ind.shape[0], self.optim_batch_size)\n select_ind = np.random.choice(\n select_ind,\n size=sample_size,\n replace=False,\n p=select_sample_prob,\n )\n\n # add some sampling on the background for bg light code\n if self.optimize_light_env:\n bg_sample_ratio = 0.2\n bg_sample_prob = sample_prob[~self.merged_mask]\n bg_sample_prob /= bg_sample_prob.sum()\n bg_sample_ind = np.arange(self.merged_mask.shape[0])[~self.merged_mask]\n # assert bg_sample_ind.shape[0] > self.optim_batch_size\n bg_sample_size = min(\n bg_sample_ind.shape[0], int(bg_sample_ratio * self.optim_batch_size)\n )\n if bg_sample_size > 0:\n bg_sample_ind = np.random.choice(\n bg_sample_ind,\n size=bg_sample_size,\n replace=False,\n p=bg_sample_prob,\n )\n select_ind = np.concatenate([select_ind, bg_sample_ind], axis=-1)\n\n select_ind = np.unique(select_ind)\n if i_step == 0:\n print(\"Actual optimization rays\", select_ind.shape[0])\n select_input_rgb = input_rgb[select_ind].float().cuda()\n\n loss_dict = {}\n all_obj_info = {} # prepare for violation loss\n\n for i, obj_id in enumerate(self.active_instance_id):\n # object to world pose\n if obj_id == 0:\n Rwo = torch.eye(3).cuda()\n two = torch.from_numpy(self.bg_scene_center).float().cuda()\n else:\n Rwo = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n\n # camera to object pose\n Toc = torch.eye(4).cuda()\n Toc[:3, :3] = Rwo.T @ Twc[:3, :3]\n Toc[:3, 3] = Rwo.T @ (Twc[:3, 3] - two)\n\n # generate object rays\n rays_o, rays_d = get_rays(directions, Toc[:3, :4])\n\n rays_o = rays_o[select_ind]\n rays_d = rays_d[select_ind]\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id\n )\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr:\n extra_dict[\n \"embedding_light_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # autoexposure\n if self.optimize_exposure:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n # we do not need to add relation constraints to bg\n if obj_id == 0:\n continue\n\n # enforce optimising on yaw\n if \"z_axis_align_loss\" in self.optimize_option:\n loss_dict[\"z_axis_loss_{}\".format(obj_id)] = (\n z_axis_loss(Rwo, 1.0) * 1e2\n )\n\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n obj_id_key = str(obj_id)\n\n if obj_id_key not in self.relation_info:\n continue\n\n # get obj_relation from input\n obj_relation = self.relation_info[obj_id_key]\n # supplement obj_type\n obj_type = self.get_type_of_instance(obj_id)\n optimized_meta[\"obj_type\"] = obj_type\n\n all_obj_info[str(obj_id)] = optimized_meta\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n \"\"\"attach wall loss\"\"\"\n if (\n \"object_room_wall_attach\" in self.optimize_option\n and obj_relation.get(\"attach_wall\", False)\n ):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n # \"face_direction\": torch.Tensor([0, 1, 0]),\n # \"face_direction\": obj_relation.get(\n # \"attach_wall_face_dir\", torch.Tensor([0, 1, 0])\n # ),\n \"face_direction\": obj_relation[\"attach_wall_face_dir\"],\n \"ray_grid_size\": 10,\n }\n # for door object, we slightly stretch the size to ensure successive hit-test\n if obj_type == \"door\" or obj_type == \"window\":\n kwargs.update(\n {\n \"ray_grid_stretch\": torch.Tensor([1.2, 1.2, 1]),\n \"use_bbox_surface_as_in_detect\": True,\n }\n )\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n \"\"\"attach floor loss\"\"\"\n if (\n \"object_room_floor_attach\" in self.optimize_option\n and obj_relation.get(\"attach_floor\", False)\n ):\n # # TODO(ybbbbt): hard code floor\n # loss_dict.update(\n # obj_attach_floor_loss(optimized_meta, floor=0.0)\n # )\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n \"face_direction\": torch.Tensor([0, 0, -1]),\n \"ray_grid_stretch\": torch.Tensor(\n [0.8, 0.8, 1.0]\n ), # avoid too close to wall\n \"use_bbox_surface_as_in_detect\": True,\n \"ray_grid_size\": 3,\n }\n if obj_type == \"door\":\n # kwargs[\"ray_grid_offset\"] = torch.Tensor(\n # [0, -0.3, 0]\n # ) # to avoid to close to wall\n assert (\n \"attach_wall_face_dir\" in obj_relation\n ), f\"door {obj_id} relation prediction failed.\"\n kwargs[\"ray_grid_offset\"] = (\n obj_relation[\"attach_wall_face_dir\"] * -0.3\n ) # to avoid to close to wall\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n # use_sphere_tracing=True,\n use_sphere_tracing=False,\n **extra_dict,\n )\n pred_rgb = results[\"rgb_fine\"]\n\n if \"photometric_loss\" in self.optimize_option:\n loss_dict[\"mse_loss\"] = mse_loss(pred_rgb, select_input_rgb).mean()\n\n if \"visualize_pred\" in self.optimize_option: # dump image for debug\n # pred_rgb_full = input_rgb.cuda()\n pred_rgb_full = torch.zeros_like(input_rgb.cuda())\n pred_rgb_full[select_ind] = pred_rgb\n\n imageio.imwrite(\n f\"debug/pred_rgb_full.png\",\n (pred_rgb_full * 255)\n .view(self.img_wh[1], self.img_wh[0], 3)\n .detach()\n .cpu()\n .numpy()\n .astype(np.uint8),\n )\n\n if \"perceptual_loss\" in self.optimize_option:\n pred_rgb_full = input_rgb.cuda()\n pred_rgb_full[select_ind] = pred_rgb\n loss_dict.update(\n patch_perceptual_loss(\n perceptual_net,\n pred_rgb_full,\n input_rgb,\n all_obj_info,\n self.instance_mask,\n self.img_wh,\n )\n )\n\n \"\"\"attach bottom to other object loss\"\"\"\n if \"object_object_attach\" in self.optimize_option:\n for obj_id_str, obj_relation in self.relation_info.items():\n if obj_relation.get(\"attach_bottom_to_object\", False):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info_src\": all_obj_info[obj_id_str],\n \"obj_info_tgt\": all_obj_info[\n str(obj_relation[\"attach_tgt_obj_id\"])\n ],\n \"face_direction\": torch.Tensor([0, 0, -1]),\n }\n loss_dict.update(object_object_attach_loss(**kwargs))\n\n # physical violation loss\n if \"physical_violation\" in self.optimize_option:\n if (\n not \"physical_violation_delayed_start\" in self.optimize_option\n or i_step >= 100\n ):\n loss_dict.update(\n physical_violation_loss(\n self,\n all_obj_info,\n N_nearest_obj=3,\n check_background_violation=True,\n # N_sample_points=1000,\n N_sample_points=2000,\n # N_sample_points=300,\n )\n )\n\n if \"viewing_constraint\" in self.optimize_option:\n loss_dict.update(viewing_constraint_loss(self, Twc, all_obj_info))\n\n if \"print_loss_dict\" in self.optimize_option:\n for k, v in loss_dict.items():\n # if \"_62\" not in k:\n # continue\n print(k, \"=\", float(v))\n loss = sum(list(loss_dict.values()))\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n t.set_description(\"Loss: %f\" % float(loss))\n t.refresh()\n # dump image\n if i_step % 20 == 0:\n self.save_optimizable_parameters(\n f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n )\n # self.load_optimizable_parameters(\n # f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n # )\n if i_step >= self.N_optim_step - 20:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n write_idx_on_image=False,\n render_mask=True,\n h=512,\n w=1280,\n )\n else:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n render_mask=False,\n h=self.h,\n w=self.w,\n )\n dump_optimization_meta_to_file(\n filepath=f\"{self.output_path}/{i_step:06d}.optim.json\",\n obj_pose_dict=self.object_pose_dict,\n )" }, { "identifier": "read_real_scene_localization", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization(pose_path: str, transform_info_json_path: str):\n pose_dict = {}\n transform_info = read_json(transform_info_json_path)\n trans_colmap_to_arkit = np.array(transform_info[\"transform_colmap_to_arkit_sRT\"])\n trans_align = np.array(transform_info[\"transform_alignment\"])\n with open(pose_path) as file:\n lines = file.readlines()\n lines = lines[1:]\n for line in lines:\n fname, tx, ty, tz, qx, qy, qz, qw, _, _ = line.strip().split(\" \")\n fname += \".png\"\n pose = np.eye(4)\n pose[0, 3] = tx\n pose[1, 3] = ty\n pose[2, 3] = tz\n # Twc\n pose[:3, :3] = Rotation.from_quat([qx, qy, qz, qw]).as_matrix()\n # pose = np.linalg.inv(pose)\n # pose_ndc = np.linalg.inv(pose_ndc)\n\n # convert to ndc\n # pose_ndc = pose\n # fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n # pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot\n\n # transform to arkit pose\n s, R, t = decompose_to_sRT(trans_colmap_to_arkit)\n # pose_ndc = transform_colmap_to_arkit @ pose_ndc\n # print(s, R, t)\n pose[:3, 3] = R @ (pose[:3, 3] * s) + t\n pose[:3, :3] = R @ pose[:3, :3]\n\n # apply alignment to poses\n pose = trans_align @ pose\n\n pose_dict[fname] = {\"pose_slam_Twc\": pose}\n # print(fname, pose)\n return pose_dict" }, { "identifier": "read_real_scene_localization_with_name", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization_with_name(arrangement_name):\n localization_info = read_real_scene_localization(\n f\"data/real_room_0/arrangement_panorama_select/{arrangement_name}/traj.txt\",\n \"data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json\",\n )\n return localization_info" }, { "identifier": "read_testing_config", "path": "optim/misc_utils.py", "snippet": "def read_testing_config():\n conf_cli = OmegaConf.from_cli()\n conf_test_file = OmegaConf.load(conf_cli.config)\n # read dataset config\n conf_test_file[\"dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"dataset_config_path\"]\n )\n conf_test_file[\"bg_dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"bg_dataset_config_path\"]\n )\n\n # processing ckpt\n ckpt_path_dict = {}\n for item in conf_test_file[\"ckpt_lists\"]:\n path = item[\"path\"]\n obj_ids = item[\"obj_ids\"]\n neus_conf = item.get(\"neus_conf\", \"config/neus.yaml\")\n for obj_id in obj_ids:\n ckpt_path_dict[str(obj_id)] = {\"path\": path, \"neus_conf\": neus_conf}\n conf_test_file[\"ckpt_path_dict\"] = ckpt_path_dict\n\n conf_merged = OmegaConf.merge(conf_test_file, conf_cli)\n return conf_merged" }, { "identifier": "read_json", "path": "utils/util.py", "snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)" } ]
import sys import os import torch import numpy as np from PIL import Image from omegaconf import OmegaConf from optim.room_optimizer import RoomOptimizer from optim.misc_utils import ( read_real_scene_localization, read_real_scene_localization_with_name, read_testing_config, ) from utils.util import read_json
14,166
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): # active_instance_id = config.active_instance_id scene_info_json_path = config.scene_info_json active_instance_id = []
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def main(config): # active_instance_id = config.active_instance_id scene_info_json_path = config.scene_info_json active_instance_id = []
for obj_info in read_json(scene_info_json_path)["objs"]:
4
2023-10-15 08:41:29+00:00
16k
WenzhengZhang/Seq2seqCoref
trainer.py
[ { "identifier": "CorefAllMetrics", "path": "metrics.py", "snippet": "class CorefAllMetrics(object):\n \"\"\"\n Wrapper for coreference resolution metrics.\n \"\"\"\n\n @staticmethod\n def _get_mention_to_x(clusters: List[list]) -> dict:\n mention_to_x = {}\n for cluster in clusters:\n for m in cluster:\n mention_to_x[m] = tuple(cluster)\n return mention_to_x\n\n def _compute_mention_detect_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]):\n # mention detection evaluation\n mention_evaluator = MentionEvaluator()\n results = {}\n predicted_mentions = list(self._get_mention_to_x(\n predicted_clusters).keys())\n gold_mentions = list(self._get_mention_to_x(gold_clusters).keys())\n mention_evaluator.update(predicted_mentions, gold_mentions)\n mention_precision, mention_recall, mention_f1 = \\\n mention_evaluator.get_prf()\n results['precision'] = mention_precision\n results['recall'] = mention_recall\n results['f1'] = mention_f1\n return results\n\n def _compute_coref_metrics(self, gold_clusters: List[list],\n predicted_clusters: List[list]) \\\n -> Dict[str, Dict[str, float]]:\n \"\"\"\n Compute all coreference metrics given a list of gold cluster and a list of predicted clusters.\n \"\"\"\n mention_to_predicted = self._get_mention_to_x(predicted_clusters)\n mention_to_gold = self._get_mention_to_x(gold_clusters)\n result = {}\n metric_name_evals = [('muc', Evaluator(muc)),\n ('b_cubed', Evaluator(b_cubed)),\n ('ceaf', Evaluator(ceafe))]\n\n for name, evaluator in metric_name_evals:\n evaluator.update(predicted_clusters, gold_clusters,\n mention_to_predicted, mention_to_gold)\n result[name] = {\n 'precision': evaluator.get_precision(),\n 'recall': evaluator.get_recall(),\n 'f1': evaluator.get_f1()\n }\n\n result['average'] = {\n 'precision': sum(\n [result[k]['precision'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'recall': sum(\n [result[k]['recall'] for k, _ in metric_name_evals]) / len(\n metric_name_evals),\n 'f1': sum([result[k]['f1'] for k, _ in metric_name_evals]) / len(\n metric_name_evals)\n }\n\n return result\n\n @staticmethod\n def _average_nested_dict(\n list_nested_dict: List[Dict[str, Dict[str, float]]]) -> Dict[\n str, Dict[str, float]]:\n \"\"\"\n Given a list of 2-level nested dict, compute the average.\n \"\"\"\n result_dict = {}\n\n # sum up all values\n for outer_dict in list_nested_dict:\n for key_outer, value_outer in outer_dict.items():\n if key_outer not in result_dict:\n result_dict[key_outer] = {}\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[\n key_outer].get(\n key_inner, 0.0) + value_inner\n\n # take the average\n for key_outer, value_outer in result_dict.items():\n for key_inner, value_inner in value_outer.items():\n result_dict[key_outer][key_inner] = result_dict[key_outer][\n key_inner] / len(\n list_nested_dict)\n\n return result_dict\n\n def get_all_metrics(self, labels: List[List[List[Tuple[int, int]]]],\n preds: List[List[List[Tuple[int, int]]]]) \\\n -> Dict[str, Dict[str, Dict[str, float]]]:\n \"\"\"\n Compute all metrics for coreference resolution.\n In input are given two list of mention groups, for example:\n [ # this is the corpus level, with a list of documents\n [ # this is the document level, with a list of mention clusters\n [ # this is the cluster level, with a list of spans\n (5, 7),\n (11, 19),\n ...\n ],\n ...\n ]\n ]\n \"\"\"\n assert len(labels) == len(preds)\n result = {}\n\n # compute micro-averaged scores (treat all clusters from all docs as a single list of clusters)\n gold_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(labels) for cluster in clusters\n ]\n predicted_clusters = [\n [(i,) + span for span in cluster] for i, clusters in\n enumerate(preds) for cluster in clusters\n ]\n coref_ment_results = self._compute_coref_metrics(gold_clusters,\n predicted_clusters)\n ment_results = self._compute_mention_detect_metrics(gold_clusters,\n predicted_clusters)\n coref_ment_results['mention_detect'] = ment_results\n result['micro'] = coref_ment_results\n\n # compute macro-averaged scores (compute p/r/f1 for each doc first, then take average per doc)\n doc_metrics = []\n for gold_clusters, predicted_clusters in zip(labels, preds):\n doc_metrics.append(self._compute_coref_metrics(\n gold_clusters, predicted_clusters\n ))\n result['macro'] = self._average_nested_dict(doc_metrics)\n\n return result" }, { "identifier": "get_document_predicts", "path": "data.py", "snippet": "def get_document_predicts(doc_preds: List[List]) -> List[\n List[Tuple[int, int]]]:\n \"\"\"\n Aggregate predictions for each chunk into document-level predictions.\n \"\"\"\n if len(doc_preds) == 0:\n return []\n graph = nx.compose_all([nx.complete_graph(p) for p in doc_preds])\n\n processed_groups = []\n for component in nx.connected_components(graph):\n processed_group = []\n for start, end in sorted(component, key=lambda x: (x[0], -x[1])):\n # add this entity if it does not overlap with the previous one\n condition = not any(\n [s < start < e < end for (s, e) in processed_group])\n # if len(processed_group) == 0 or start >= processed_group[-1][1]:\n # processed_group.append((start, end))\n if len(processed_group) == 0 or condition:\n processed_group.append((start, end))\n\n processed_groups.append(processed_group)\n\n return [[(start, end) for start, end in group] for group in\n processed_groups]" }, { "identifier": "parse_int_output_tokens", "path": "data.py", "snippet": "def parse_int_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n thred, is_tagging):\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_output_ids = []\n if is_tagging:\n new_input_ids = [special_ids['copy'] for t in input_ids if\n t != tokenizer.pad_token_id and t != special_ids[\n 'eos']]\n new_input_ids.append(special_ids['eos'])\n else:\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n token_mentions = []\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n new_id += 1\n ment_start_stack.append([new_id, 'name', []])\n if is_tagging:\n new_output_ids.append(output_ids[i])\n elif output_ids[i] == special_ids['mention_end']:\n new_id += 0\n if is_tagging:\n new_output_ids.append(output_ids[i])\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n if item[1] == \"ent\":\n unmatched_clusters[tuple(item[-1])].append(\n (item[0], new_id))\n else:\n # a normal token\n # if output_ids[i] == special_ids['sep']:\n # status = \"ent\"\n if len(ment_start_stack) > 0:\n # inside some entities\n if output_ids[i] == special_ids['sep']:\n ment_start_stack[-1][1] = \"ent\"\n if is_tagging:\n new_output_ids.append(output_ids[i])\n else:\n if ment_start_stack[-1][1] == 'ent':\n ment_start_stack[-1][2].append(output_ids[i])\n if is_tagging:\n new_output_ids.append(output_ids[i])\n elif ment_start_stack[-1][1] == 'name':\n new_id += 1\n rec_ids.append(output_ids[i])\n if is_tagging:\n new_output_ids.append(input_ids[new_id])\n else:\n raise ValueError('wrong status')\n else:\n # outside\n new_id += 1\n rec_ids.append(output_ids[i])\n if is_tagging:\n new_output_ids.append(input_ids[new_id])\n if output_ids[i] == special_ids['mention_start']:\n new_id -= 1\n # thred = 1 if allow_singletons else 2\n # Needleman-Wunsch text alignment algorithm\n wrong_reconstruction = (rec_ids != new_input_ids)\n if wrong_reconstruction:\n print(f'new input ids {new_input_ids}')\n print(f'reconstructed ids {rec_ids}')\n print(f'out ids {output_ids}')\n print('wrong reconstruction! please debug')\n matching = global_align(new_input_ids, rec_ids)\n\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n token_mentions.append((new_start, new_end))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n token_mentions = list(set(token_mentions))\n else:\n clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v\n in\n unmatched_clusters.values()]\n predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=\n thred]\n token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()\n for m in v]\n token_mentions = list(set(token_mentions))\n if not is_tagging:\n new_output_ids = output_ids\n return predict_clusters, token_mentions, new_output_ids" }, { "identifier": "parse_short_target_tokens", "path": "data.py", "snippet": "def parse_short_target_tokens(input_ids, output_ids,\n special_ids, subtoken_map, tokenizer,\n align_mode, thred, split_sentence):\n # support mark sentence, align sentence by sentence\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n ment_start_stack.append([new_id + 1, 'name', []])\n elif output_ids[i] == special_ids['mention_end']:\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n if item[1] == \"ent\":\n unmatched_clusters[tuple(item[-1])].append(\n (item[0], new_id))\n else:\n # a normal token\n if len(ment_start_stack) > 0:\n # inside some entities\n if output_ids[i] == special_ids['sep']:\n ment_start_stack[-1][1] = \"ent\"\n else:\n if ment_start_stack[-1][1] == 'ent':\n ment_start_stack[-1][2].append(output_ids[i])\n elif ment_start_stack[-1][1] == 'name':\n new_id += 1\n rec_ids.append(output_ids[i])\n else:\n raise ValueError('wrong status')\n\n else:\n # outside\n new_id += 1\n rec_ids.append(output_ids[i])\n # mapping.append(new_id)\n # thred = 1 if allow_singletons else 2\n # Affine global text alignment algorithm\n if split_sentence:\n input_sents = split_list(\n new_input_ids, special_ids['sentence_start'], True)\n out_sents = split_list(rec_ids, special_ids['sentence_start'], True)\n try:\n assert len(input_sents) == len(out_sents)\n aligned_input_ids, aligned_rec_ids, matching = [], [], {}\n input_offset, out_offset = 0, 0\n for input_sent, out_sent in zip(input_sents, out_sents):\n aligned_input_sent, aligned_out_sent, sent_match = \\\n affine_global_align(input_sent, out_sent,\n special_ids['copy'],\n align_mode)\n aligned_input_ids.extend(aligned_input_sent)\n aligned_rec_ids.extend(aligned_out_sent)\n matching.update(\n {k + out_offset: v + input_offset for k, v in\n sent_match.items()})\n input_offset += len(input_sent)\n out_offset += len(out_sent)\n except AssertionError:\n print(f'input sents and out sents different length '\n f'{len(input_sents)} vs {len(out_sents)}, have to use '\n f'global alignment')\n aligned_input_ids, aligned_rec_ids, matching = affine_global_align(\n new_input_ids, rec_ids, special_ids['copy'], align_mode)\n else:\n aligned_input_ids, aligned_rec_ids, matching = affine_global_align(\n new_input_ids, rec_ids, special_ids['copy'], align_mode)\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n return predict_clusters, aligned_input_ids, aligned_rec_ids" }, { "identifier": "parse_nonint_output_tokens", "path": "data.py", "snippet": "def parse_nonint_output_tokens(input_ids, output_ids,\n special_ids, subtoken_map,\n tokenizer,\n add_mention_end,\n thred):\n rec_ids, new_id = [], -1\n ment_start_stack = []\n unmatched_clusters = defaultdict(list)\n new_input_ids = [t for t in input_ids if t != tokenizer.pad_token_id]\n token_mentions = []\n for i in range(len(output_ids)):\n if output_ids[i] == tokenizer.pad_token_id:\n break\n if output_ids[i] == special_ids['mention_start']:\n new_id += 1\n ment_start_stack.append(new_id)\n elif add_mention_end and output_ids[i] == special_ids['mention_end']:\n assert output_ids[i + 1] in special_ids['cluster_ids_to_num']\n cid = special_ids['cluster_ids_to_num'][output_ids[i + 1]]\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n unmatched_clusters[cid].append((item, new_id))\n elif output_ids[i] in special_ids['cluster_ids_to_num']:\n if not add_mention_end:\n cid = special_ids['cluster_ids_to_num'][output_ids[i]]\n if len(ment_start_stack) > 0:\n item = ment_start_stack.pop()\n unmatched_clusters[cid].append((item, new_id))\n else:\n new_id += 1\n rec_ids.append(output_ids[i])\n if output_ids[i] == special_ids['mention_start']:\n new_id -= 1\n # Needleman-Wunsch text alignment algorithm\n wrong_reconstruction = (rec_ids != new_input_ids)\n # thred = 1 if allow_singletons else 2\n if wrong_reconstruction:\n print(f'new input ids {new_input_ids}')\n print(f'reconstructed ids {rec_ids}')\n print(f'out ids {output_ids}')\n print('wrong reconstruction! please debug')\n matching = global_align(new_input_ids, rec_ids)\n\n # update predicted entities with the positions in the original sentence\n clusters = defaultdict(list)\n\n for ent_id, ments in unmatched_clusters.items():\n for start, end in ments:\n new_start = None # start in the original sequence\n new_end = None # end in the original sequence\n\n for j in range(start, end + 1):\n if j in matching:\n if new_start is None:\n new_start = matching[j]\n\n new_end = matching[j]\n\n if new_start is not None:\n # predict entity\n clusters[ent_id].append((\n subtoken_map[new_start], subtoken_map[new_end]))\n token_mentions.append((new_start, new_end))\n predict_clusters = [list(set(v)) for k, v in clusters.items() if\n len(set(v)) >= thred]\n token_mentions = list(set(token_mentions))\n else:\n clusters = [[(subtoken_map[m[0]], subtoken_map[m[1]]) for m in v] for v\n in\n unmatched_clusters.values()]\n predict_clusters = [list(set(v)) for v in clusters if len(set(v)) >=\n thred]\n token_mentions = [(m[0], m[1]) for v in unmatched_clusters.values()\n for m in v]\n token_mentions = list(set(token_mentions))\n return predict_clusters, token_mentions, output_ids" }, { "identifier": "SPECIAL_IDS", "path": "constants.py", "snippet": "SPECIAL_IDS = {\n 'speaker_start': int_tokenizer.encode(SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end': int_tokenizer.encode(SPEAKER_END, add_special_tokens=False)[\n 0],\n 'mention_start': int_tokenizer.encode(MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': int_tokenizer.encode(MENTION_END, add_special_tokens=False)[\n 0],\n 'sep': int_tokenizer.encode(SEP_TOKEN, add_special_tokens=False)[0],\n 'copy': int_tokenizer.encode(COPY, add_special_tokens=False)[0],\n 'eos': int_tokenizer.eos_token_id\n}" }, { "identifier": "MARK_SPECIAL_IDS", "path": "constants.py", "snippet": "MARK_SPECIAL_IDS = deepcopy(SPECIAL_IDS)" }, { "identifier": "NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "NON_INT_SPECIAL_IDS = {\n 'speaker_start': non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'cluster_ids': MENTION_ENDS_IDS,\n 'cluster_ids_to_num': END_IDS_TO_NUM,\n 'cluster_new': non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MENTION_END_NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "MENTION_END_NON_INT_SPECIAL_IDS = {\n 'speaker_start': mention_end_non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n mention_end_non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': mention_end_non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': mention_end_non_int_tokenizer.encode(\n MENTION_END,\n add_special_tokens=False)[0],\n 'cluster_ids': CLUSTER_IDS,\n 'cluster_ids_to_num': CLUSTER_IDS_TO_NUM,\n 'cluster_new': mention_end_non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': mention_end_non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': mention_end_non_int_tokenizer.eos_token_id\n}" }, { "identifier": "ShortSeqProcessor", "path": "logits_processor.py", "snippet": "class ShortSeqProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids):\n self.orig_inputs = orig_inputs\n self.sentence_start = special_ids['sentence_start']\n self.sentence_end = special_ids['sentence_end']\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids['mention_end']\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [special_ids['mention_end']]\n self.eos_id = special_ids['eos']\n self.sentence_mask = self.get_sentence_mask(orig_inputs)\n\n def get_sentence_mask(self, orig_inputs: torch.Tensor):\n # index from 1 instead of 0\n return (orig_inputs == self.sentence_start).cumsum(-1)\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n is_sent_start = (input_ids == self.sentence_start)\n is_sent_end = (input_ids == self.sentence_end)\n sent_idx = is_sent_start.sum(-1, keepdim=True)\n unclose_sent = (sent_idx.sum(-1) - is_sent_end.sum(-1)) > 0\n close_sent = (~unclose_sent)\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n unclose_ent = (is_ent[:, -1] & unclose_sent)\n unclose_ment = (is_start.sum(-1) - is_sep.sum(-1)) > 0\n close_ent = (~unclose_ent)\n unclose_ment = (close_ent & unclose_ment & unclose_sent)\n masks = torch.ones_like(scores, dtype=torch.bool)\n masks[unclose_sent, self.sentence_end] = False\n masks[close_sent, self.sentence_start] = False\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n sent_mask = self.sentence_mask.repeat_interleave(num_beams, 0)\n cur_sent_mask = (sent_mask != sent_idx)\n sent_ids = orig_ids.masked_fill(cur_sent_mask, self.sentence_end)\n masks[unclose_sent] = masks[unclose_sent].scatter(1, sent_ids[\n unclose_sent], False)\n masks[unclose_sent, self.sentence_start] = True\n masks[unclose_ent, torch.tensor(self.ent_ids).unsqueeze(1)] = False\n masks[close_ent, self.mention_start] = False\n masks[unclose_ment, self.sep] = False\n is_eos = (close_sent & (sent_idx.sum(-1) == sent_mask[:, -1]))\n masks[is_eos] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" }, { "identifier": "IntProcessor", "path": "logits_processor.py", "snippet": "class IntProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids, seq2seq_type):\n \"\"\"\n\n :param orig_inputs: original input_ids\n :param special_ids: dict with keys:[mention_start, mention_end, sep,\n integers]\n \"\"\"\n self.orig_inputs = orig_inputs\n self.seq2seq_type = seq2seq_type\n self.special_ids = special_ids\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids['mention_end']\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [special_ids['mention_end']]\n self.specials = [self.mention_start, self.sep] + self.ent_ids\n if self.seq2seq_type == 'action' or self.seq2seq_type == 'tagging' or \\\n self.seq2seq_type == 'input_feed':\n self.copy_id = special_ids['copy']\n self.specials.append(self.copy_id)\n self.eos_id = special_ids['eos']\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n\n :param input_ids: BC x l\n :param scores: BC x V\n :return:\n \"\"\"\n # input_ids : B x L\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n is_copy = ((~is_start) & (~is_ent) & (~is_end))\n unclose_ent = is_ent[:, -1]\n unclose_ment = (is_start.sum(-1) - is_sep.sum(-1)) > 0\n unclose_ment = ((~unclose_ent) & unclose_ment)\n # -1 for <pad> at begining\n num_copied = is_copy.sum(-1) - 1\n masks = torch.ones_like(scores, dtype=torch.bool)\n close_ent = (~unclose_ent)\n num_copied = num_copied.clamp(max=self.orig_inputs.size(1) - 1)\n # unclosed ent only allows to generate cluster ids or end mention id\n masks[unclose_ent, torch.tensor(self.ent_ids).unsqueeze(1)] = False\n masks[close_ent, self.mention_start] = False\n masks[unclose_ment, self.sep] = False\n # get next copy id\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n next_ids = orig_ids[torch.arange(scores.size(0)), num_copied]\n if self.seq2seq_type == 'tagging':\n masks[close_ent, self.copy_id] = False\n else:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[close_ent, next_ids[close_ent]] = scores[close_ent,\n self.copy_id]\n masks[close_ent, next_ids[close_ent]] = False\n is_eos = (close_ent & (next_ids == self.eos_id))\n masks[is_eos, torch.tensor(self.specials).unsqueeze(1)] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" }, { "identifier": "NonIntProcessor", "path": "logits_processor.py", "snippet": "class NonIntProcessor(LogitsProcessor):\n\n def __init__(self, orig_inputs, special_ids,\n seq2seq_type,\n add_mention_end):\n \"\"\"\n\n :param orig_inputs: original input_ids\n :param special_ids: dict with keys:[mention_start, mention_end, sep,\n integers]\n :param add_mention_end: whether predict mention end before predict\n cluster ids\n \"\"\"\n self.orig_inputs = orig_inputs\n self.special_ids = special_ids\n self.seq2seq_type = seq2seq_type\n self.mention_start = special_ids['mention_start']\n if add_mention_end:\n self.mention_end = special_ids['mention_end']\n else:\n self.mention_end = None\n self.cluster_ids = torch.tensor(special_ids['cluster_ids'],\n dtype=torch.long)\n self.cluster_new = special_ids['cluster_new']\n self.copy_id = special_ids['copy']\n self.eos_id = special_ids['eos']\n self.first_cluster_id = special_ids['cluster_ids'][0]\n self.last_cluster_id = special_ids['cluster_ids'][-1]\n self.add_mention_end = add_mention_end\n\n def __call__(self, input_ids: torch.LongTensor,\n scores: torch.FloatTensor) -> torch.FloatTensor:\n \"\"\"\n\n :param input_ids: BC x l\n :param scores: BC x V\n :return:\n \"\"\"\n # input_ids : B x L\n cluster_ids = self.cluster_ids.to(input_ids.device)\n range_indices = torch.arange(scores.size(0))\n is_not_cid = torch.isin(input_ids, cluster_ids, invert=True)\n is_not_start = (input_ids != self.mention_start)\n if self.add_mention_end:\n is_not_end = (input_ids != self.mention_end)\n unclosed_ent = (input_ids[:, -1] == self.mention_end)\n close_ent = (~unclosed_ent)\n is_copy = (is_not_start & is_not_end & is_not_cid)\n else:\n is_not_end = is_not_cid\n is_copy = (is_not_start & is_not_end)\n unclosed_ment = (is_not_start.sum(-1) - is_not_end.sum(-1)) < 0\n if self.add_mention_end:\n unclosed_ment = (close_ent & unclosed_ment)\n # -1 for <pad> at begining\n num_copied = is_copy.sum(-1) - 1\n masks = torch.ones_like(scores, dtype=torch.bool)\n num_copied = num_copied.clamp(max=self.orig_inputs.size(1) - 1)\n # unclosed ent only allows to generate cluster ids or end mention id\n # masks[:, self.specials] = False\n if self.add_mention_end:\n masks[close_ent, self.mention_start] = False\n masks[unclosed_ment, self.mention_end] = False\n else:\n masks[:, self.mention_start] = False\n # notice: make sure </mk> and </mk+1> are next to each other in vocab\n cluster_input_ids = input_ids.masked_fill(\n is_not_cid,\n self.first_cluster_id - 1)\n next_cids = cluster_input_ids.amax(-1) + 1\n if self.add_mention_end:\n has_prev_ends = (unclosed_ent & (next_cids > self.first_cluster_id))\n masks[unclosed_ent, next_cids[unclosed_ent]] = False\n else:\n has_prev_ends = (unclosed_ment & (next_cids >\n self.first_cluster_id))\n masks[unclosed_ment, next_cids[unclosed_ment]] = False\n\n masks[has_prev_ends] = masks[has_prev_ends].scatter(\n 1, cluster_input_ids[has_prev_ends], False)\n masks[has_prev_ends, self.first_cluster_id - 1] = True\n # get next copy id\n assert scores.size(0) % self.orig_inputs.size(0) == 0\n num_beams = scores.size(0) // self.orig_inputs.size(0)\n # repeat over beams\n orig_ids = self.orig_inputs.repeat_interleave(num_beams, 0)\n next_ids = orig_ids[range_indices, num_copied]\n if self.add_mention_end:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[close_ent, next_ids[close_ent]] = scores[close_ent,\n self.copy_id]\n scores[unclosed_ent, next_cids[unclosed_ent]] = scores[\n unclosed_ent, self.cluster_new]\n masks[close_ent, next_ids[close_ent]] = False\n else:\n if self.seq2seq_type == 'action' or self.seq2seq_type == \\\n 'input_feed':\n scores[range_indices, next_ids] = scores[:, self.copy_id]\n scores[unclosed_ment, next_cids[unclosed_ment]] = scores[\n unclosed_ment,\n self.cluster_new]\n masks[range_indices, next_ids] = False\n is_eos = (next_ids == self.eos_id)\n masks[is_eos] = True\n masks[is_eos, self.eos_id] = False\n scores.masked_fill_(masks, -float('inf'))\n return scores" } ]
import time import torch.distributed as dist import sys import numpy as np import os import json import re import torch.nn as nn import torch import shutil import math import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl import smdistributed.modelparallel.torch as smp import safetensors.torch from tqdm.auto import tqdm from transformers.trainer_utils import HPSearchBackend, speed_metrics, \ TrainOutput from pathlib import Path from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from transformers.trainer_callback import TrainerState from transformers.trainer import TRAINER_STATE_NAME, OptimizerNames from transformers.utils import is_apex_available from transformers.integrations import hp_params from transformers import Seq2SeqTrainer from packaging import version from collections import defaultdict from metrics import CorefAllMetrics from typing import Dict, Union, Any, Optional, Tuple, List from transformers.debug_utils import DebugOption, DebugUnderflowOverflow from transformers.pytorch_utils import is_torch_less_than_1_11 from torch.utils.data import DataLoader from transformers.trainer_utils import EvalLoopOutput, has_length, \ denumpify_detensorize, ShardedDDPOption from data import get_document_predicts, parse_int_output_tokens, \ parse_short_target_tokens, parse_nonint_output_tokens from constants import SPECIAL_IDS, MARK_SPECIAL_IDS, NON_INT_SPECIAL_IDS, \ MENTION_END_NON_INT_SPECIAL_IDS from transformers.deepspeed import deepspeed_init from transformers.trainer_pt_utils import find_batch_size, nested_concat, \ nested_numpify, IterableDatasetShard, nested_truncate, get_parameter_names from transformers.modeling_utils import PreTrainedModel, unwrap_model, \ load_sharded_checkpoint from transformers.utils import logging, is_torch_tpu_available, \ is_sagemaker_mp_enabled, is_safetensors_available, SAFE_WEIGHTS_NAME, \ WEIGHTS_NAME, WEIGHTS_INDEX_NAME from transformers.integrations import is_fairscale_available from transformers.dependency_versions_check import dep_version_check from smdistributed.modelparallel import __version__ as SMP_VERSION from apex import amp from transformers import LogitsProcessorList from logits_processor import ShortSeqProcessor, IntProcessor, NonIntProcessor from transformers.trainer_seq2seq import is_deepspeed_zero3_enabled
10,954
self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS pred_data, pred_token_mentions, predict_ids = \ parse_nonint_output_tokens( input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.add_mention_end, thred) pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in pred_token_mentions] pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = {'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'predict_clusters': pred_data, 'gold_clusters': gold_data, 'predict_token_mentions': pred_token_mentions } # list of (m1,m2) documents_to_chunk_data[doc_id].extend(pred_data) documents_to_chunk_gold[doc_id].extend(gold_data) out_sents.append(out_predict) if doc_id != last_doc_id:
if is_torch_tpu_available(check_device=False): if is_fairscale_available(): dep_version_check("fairscale") if is_sagemaker_mp_enabled(): IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse( "1.10") else: IS_SAGEMAKER_MP_POST_1_10 = False if is_safetensors_available(): if is_apex_available(): logger = logging.get_logger(__name__) TRAINING_ARGS_NAME = "training_args.bin" TRAINER_STATE_NAME = "trainer_state.json" OPTIMIZER_NAME = "optimizer.pt" SCHEDULER_NAME = "scheduler.pt" SCALER_NAME = "scaler.pt" class CorefTrainer(Seq2SeqTrainer): def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) if self.args.val_after_train and self.args.eval_delay < \ self.state.global_step: for checkpoint in checkpoints_sorted[:-1]: states_dir = [str(x) for x in Path( checkpoint).glob(f'global_step*') if os.path.isdir(x)] for state_dir in states_dir: logger.info(f"Deleting optimizer states of saved " f"checkpoint {checkpoint}") if os.path.exists(state_dir) and os.path.isdir( state_dir): shutil.rmtree(state_dir) else: if len(checkpoints_sorted) <= self.args.save_total_limit: return # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which # we don't do to allow resuming. save_total_limit = self.args.save_total_limit if ( self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1 and checkpoints_sorted[ -1] != self.state.best_model_checkpoint ): save_total_limit = 2 number_of_checkpoints_to_delete = max(0, len( checkpoints_sorted) - save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[ :number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) def _save(self, output_dir: Optional[str] = None, state_dict=None): # If we are executing this function, we are the process zero, so we don't check for that. output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info(f"Saving model checkpoint to {output_dir}") # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel) and not hasattr( self.model, 'save_pretrained'): if state_dict is None: state_dict = self.model.state_dict() if isinstance(unwrap_model(self.model), PreTrainedModel): unwrap_model(self.model).save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) else: logger.info( "Trainer.model is not a `PreTrainedModel`, only saving its state dict.") # if self.args.save_safetensors: # safetensors.torch.save_file(state_dict, # os.path.join(output_dir, # SAFE_WEIGHTS_NAME)) # else: torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: self.model.save_pretrained( output_dir, state_dict=state_dict, # safe_serialization=self.args.save_safetensors ) if self.tokenizer is not None: self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) def _inner_training_loop( self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None ): self._train_batch_size = batch_size # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size len_dataloader = None if has_length(train_dataloader): len_dataloader = len(train_dataloader) num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) num_examples = self.num_examples(train_dataloader) if args.max_steps > 0: max_steps = args.max_steps num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( args.max_steps % num_update_steps_per_epoch > 0 ) # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's # the best we can do. num_train_samples = args.max_steps * total_train_batch_size else: max_steps = math.ceil( args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(args.num_train_epochs) num_train_samples = self.num_examples( train_dataloader) * args.num_train_epochs elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size max_steps = args.max_steps # Setting a very large number of epochs so we go as many times as necessary over the iterator. num_train_epochs = sys.maxsize num_update_steps_per_epoch = max_steps num_examples = total_train_batch_size * args.max_steps num_train_samples = args.max_steps * total_train_batch_size else: raise ValueError( "args.max_steps must be set to a positive value if dataloader does not have a length, was" f" {args.max_steps}" ) if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: if self.args.n_gpu > 1: # nn.DataParallel(model) replicates the model, creating new variables and module # references registered here no longer work on other gpus, breaking the module raise ValueError( "Currently --debug underflow_overflow is not supported under DP. Please use DDP" " (torch.distributed.launch)." ) else: debug_overflow = DebugUnderflowOverflow(self.model) # noqa delay_optimizer_creation = ( self.sharded_ddp is not None and self.sharded_ddp != ShardedDDPOption.SIMPLE or is_sagemaker_mp_enabled() or self.fsdp is not None ) if args.deepspeed: deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint ) self.model = deepspeed_engine.module self.model_wrapped = deepspeed_engine self.deepspeed = deepspeed_engine self.optimizer = optimizer self.lr_scheduler = lr_scheduler elif not delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Activate gradient checkpointing if needed if args.gradient_checkpointing: self.model.gradient_checkpointing_enable() model = self._wrap_model(self.model_wrapped) if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: self._load_from_checkpoint(resume_from_checkpoint, model) # for the rest of this function `model` is the outside model, whether it was wrapped or not if model is not self.model: self.model_wrapped = model if delay_optimizer_creation: self.create_optimizer_and_scheduler(num_training_steps=max_steps) # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(resume_from_checkpoint) # important: at this point: # self.model is the Transformers Model # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. # Train! logger.info("***** Running training *****") logger.info(f" Num examples = {num_examples}") logger.info(f" Num Epochs = {num_train_epochs}") logger.info( f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info( f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") logger.info( f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {max_steps}") logger.info( f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" ) self.state.epoch = 0 start_time = time.time() epochs_trained = 0 steps_trained_in_current_epoch = 0 steps_trained_progress_bar = None # Check if continuing training from a checkpoint if resume_from_checkpoint is not None and os.path.isfile( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) ): self.state = TrainerState.load_from_json( os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) epochs_trained = self.state.global_step // num_update_steps_per_epoch if not args.ignore_data_skip: steps_trained_in_current_epoch = self.state.global_step % ( num_update_steps_per_epoch) steps_trained_in_current_epoch *= args.gradient_accumulation_steps else: steps_trained_in_current_epoch = 0 logger.info( " Continuing training from checkpoint, will skip to saved global_step") logger.info(f" Continuing training from epoch {epochs_trained}") logger.info( f" Continuing training from global step {self.state.global_step}") if not args.ignore_data_skip: logger.info( f" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} " "batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` " "flag to your launch command, but you will resume the training on data already seen by your model." ) if self.is_local_process_zero() and not args.disable_tqdm: steps_trained_progress_bar = tqdm( total=steps_trained_in_current_epoch) steps_trained_progress_bar.set_description( "Skipping the first batches") # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader if self.hp_name is not None and self._trial is not None: # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial # parameter to Train when using DDP. self.state.trial_name = self.hp_name(self._trial) if trial is not None: assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial self.state.trial_params = hp_params(assignments) else: self.state.trial_params = None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() # tr_loss is a tensor to avoid synchronization of TPUs through .item() tr_loss = torch.tensor(0.0).to(args.device) # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses self._total_loss_scalar = 0.0 self._globalstep_last_logged = self.state.global_step model.zero_grad() self.control = self.callback_handler.on_train_begin(args, self.state, self.control) # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. if not args.ignore_data_skip: for epoch in range(epochs_trained): is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( train_dataloader.sampler, RandomSampler ) if is_torch_less_than_1_11 or not is_random_sampler: # We just need to begin an iteration to create the randomization of the sampler. # That was before PyTorch 1.11 however... if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) for _ in train_dataloader: break else: # Otherwise we need to call the whooooole sampler cause there is some random operation added # AT THE VERY END! _ = list(train_dataloader.sampler) if args.manual_empty_cache: torch.cuda.empty_cache() for epoch in range(epochs_trained, num_train_epochs): if self.args.joint_train: train_dataloader.dataset.set_samples(epoch) if isinstance(train_dataloader, DataLoader) and isinstance( train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) elif hasattr(train_dataloader, "dataset") and isinstance( train_dataloader.dataset, IterableDatasetShard): train_dataloader.dataset.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [ args.device]).per_device_loader(args.device) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if args.past_index >= 0: self._past = None steps_in_epoch = ( len(epoch_iterator) if len_dataloader is not None else args.max_steps * args.gradient_accumulation_steps ) self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) step = -1 if args.manual_empty_cache: torch.cuda.empty_cache() for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if args.manual_empty_cache: torch.cuda.empty_cache() if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 if steps_trained_progress_bar is not None: steps_trained_progress_bar.update(1) if steps_trained_in_current_epoch == 0: self._load_rng_state(resume_from_checkpoint) continue elif steps_trained_progress_bar is not None: steps_trained_progress_bar.close() steps_trained_progress_bar = None if step % args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(args, self.state, self.control) # if args.manual_empty_cache: # torch.cuda.empty_cache() if ( ((step + 1) % args.gradient_accumulation_steps != 0) and args.local_rank != -1 and args._no_sync_in_gradient_accumulation ): # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. with model.no_sync(): tr_loss_step = self.training_step(model, inputs) else: tr_loss_step = self.training_step(model, inputs) if ( args.logging_nan_inf_filter and not is_torch_tpu_available() and ( torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) ): # if loss is nan or inf simply add the average of previous logged losses tr_loss += tr_loss / ( 1 + self.state.global_step - self._globalstep_last_logged) else: tr_loss += tr_loss_step self.current_flos += float(self.floating_point_ops(inputs)) # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps if self.deepspeed: if args.manual_empty_cache: torch.cuda.empty_cache() self.deepspeed.step() if (step + 1) % args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # Gradient clipping if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: # deepspeed does its own clipping if self.do_grad_scaling: # Reduce gradients first for XLA if is_torch_tpu_available(): gradients = xm._fetch_gradients(self.optimizer) xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) # AMP: gradients need unscaling self.scaler.unscale_(self.optimizer) if is_sagemaker_mp_enabled() and args.fp16: self.optimizer.clip_master_grads(args.max_grad_norm) elif hasattr(self.optimizer, "clip_grad_norm"): # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping self.optimizer.clip_grad_norm(args.max_grad_norm) elif hasattr(model, "clip_grad_norm_"): # Some models (like FullyShardedDDP) have a specific way to do gradient clipping model.clip_grad_norm_(args.max_grad_norm) else: # Revert to normal clipping otherwise, handling Apex or full precision nn.utils.clip_grad_norm_( amp.master_params( self.optimizer) if self.use_apex else model.parameters(), args.max_grad_norm, ) # Optimizer step optimizer_was_run = True if self.deepspeed: pass # called outside the loop elif is_torch_tpu_available(): if self.do_grad_scaling: self.scaler.step(self.optimizer) self.scaler.update() else: xm.optimizer_step(self.optimizer) elif self.do_grad_scaling: scale_before = self.scaler.get_scale() self.scaler.step(self.optimizer) self.scaler.update() scale_after = self.scaler.get_scale() optimizer_was_run = scale_before <= scale_after else: self.optimizer.step() if optimizer_was_run and not self.deepspeed: self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch if args.manual_empty_cache: torch.cuda.empty_cache() self.control = self.callback_handler.on_step_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) else: self.control = self.callback_handler.on_substep_end(args, self.state, self.control) if self.control.should_epoch_stop or self.control.should_training_stop: break if step < 0: logger.warning( "There seems to be not a single sample in your epoch_iterator, stopping training at step" f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" f" num_steps ({max_steps}) higher than the number of available samples." ) self.control.should_training_stop = True self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) if DebugOption.TPU_METRICS_DEBUG in self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") logger.info( "\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: # Wait for everyone to get here so we are sur the model has been saved by process 0. if is_torch_tpu_available(): xm.rendezvous("load_best_model_at_end") elif args.local_rank != -1: dist.barrier() elif is_sagemaker_mp_enabled(): smp.barrier() self._load_best_model() # add remaining tr_loss self._total_loss_scalar += tr_loss.item() train_loss = self._total_loss_scalar / self.state.global_step metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) self.store_flos() metrics["total_flos"] = self.state.total_flos metrics["train_loss"] = train_loss self.is_in_train = False self._memory_tracker.stop_and_update_metrics(metrics) self.log(metrics) run_dir = self._get_output_dir(trial) checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint. if self.state.best_model_checkpoint is not None and \ self.args.save_total_limit == 1 and self.is_world_process_zero(): for checkpoint in checkpoints_sorted: if checkpoint != self.state.best_model_checkpoint: logger.info( f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") shutil.rmtree(checkpoint) self.control = self.callback_handler.on_train_end(args, self.state, self.control) return TrainOutput(self.state.global_step, train_loss, metrics) def my_compute_metrics(self, doc_labels: Dict[str, List[List]], predicts: Any, samples: List, split: str, id_to_name: Dict = None ) -> Dict: if self.args.joint_train: data_names = self.args.joint_data_names.split(',') joint_threds = [ int(t) for t in self.args.joint_min_num_mentions.split(',')] name_to_threds = {n: t for n, t in zip(data_names, joint_threds)} documents_to_chunk_data = defaultdict(list) documents_to_chunk_gold = defaultdict(list) predictions = {} golds = {} assert len(samples) == len(predicts) out_sents = [] last_doc_id = re.sub(r'_\d+$', '', samples[0]['doc_key']) for sample, predict in zip(samples, predicts): doc_key = sample['doc_key'] doc_id = re.sub(r'_\d+$', '', doc_key) # require convert to ids first input_ids = sample['sentence'] subtoken_map = sample['subtoken_map'] offset = sample['offset'] # remove bos predict_ids = predict[1:].tolist() gold_data = sample['seg_clusters'] if self.args.joint_train: thred = name_to_threds[id_to_name[doc_id]] else: thred = self.args.min_num_mentions if self.args.seq2seq_type == "short_seq": special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \ else SPECIAL_IDS pred_data, aligned_input_ids, aligned_pred_ids = \ parse_short_target_tokens(input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.align_mode, thred, self.args.mark_sentence ) pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = { 'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_pred_ids ), 'input_aligned_text': self.tokenizer.convert_ids_to_tokens( aligned_input_ids ) } else: is_tagging = (self.args.seq2seq_type == 'tagging') if self.args.action_type == 'integer': pred_data, pred_token_mentions, predict_ids = \ parse_int_output_tokens( input_ids, predict_ids, SPECIAL_IDS, subtoken_map, self.tokenizer, thred, is_tagging) else: special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \ self.args.add_mention_end else NON_INT_SPECIAL_IDS pred_data, pred_token_mentions, predict_ids = \ parse_nonint_output_tokens( input_ids, predict_ids, special_ids, subtoken_map, self.tokenizer, self.args.add_mention_end, thred) pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in pred_token_mentions] pred_tokens = self.tokenizer.convert_ids_to_tokens( predict_ids) out_predict = {'doc_key': doc_key, 'pred_tokens': pred_tokens, 'pred_text': self.tokenizer.convert_tokens_to_string( pred_tokens), 'predict_clusters': pred_data, 'gold_clusters': gold_data, 'predict_token_mentions': pred_token_mentions } # list of (m1,m2) documents_to_chunk_data[doc_id].extend(pred_data) documents_to_chunk_gold[doc_id].extend(gold_data) out_sents.append(out_predict) if doc_id != last_doc_id:
predictions[last_doc_id] = get_document_predicts(
1
2023-10-17 17:39:16+00:00
16k
chenxn2020/GOSE
GOSEfinetune/models/LiLTRobertaLike/modeling_LiLTRobertaLike.py
[ { "identifier": "LiLTRobertaLikeConfig", "path": "GOSEfinetune/models/LiLTRobertaLike/configuration_LiLTRobertaLike.py", "snippet": "class LiLTRobertaLikeConfig(RobertaConfig):\n model_type = \"liltrobertalike\"\n\n def __init__(\n self,\n channel_shrink_ratio=4,\n max_2d_position_embeddings=1024,\n **kwargs\n ):\n super().__init__(\n **kwargs,\n )\n self.channel_shrink_ratio = channel_shrink_ratio\n self.max_2d_position_embeddings = max_2d_position_embeddings" }, { "identifier": "RE", "path": "GOSEfinetune/modules/decoders/RE.py", "snippet": "class RE(nn.Module):\n def __init__(self,args):\n super().__init__()\n self.cnt=0\n self.rounds = 5\n self.hidden_size = 960\n self.dim = self.hidden_size // 2\n self.hidden_dropout_prob = 0.5\n self.max_key = 64\n self.max_value = 64\n self.pooling_mode = 'max'\n self.softmax = nn.Softmax(dim=-1)\n self.loss_fct = CrossEntropyLoss()\n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n\n self.output = nn.Linear(self.dim,2)\n self.k_up = nn.Linear(2,self.dim)\n self.v_up = nn.Linear(2,self.dim)\n\n self.type_token = nn.Parameter(torch.normal(0,0.0002,size=(1,self.hidden_size)))\n self.biaffine_type = BiaffineAttention(self.dim , 3)\n self.biaffine = BiaffineAttention(self.dim , 2)\n self.ffn = nn.Linear(2,self.dim)\n self.ffn_type = nn.Linear(3,self.dim)\n self.attn_type = Attention_logits(self.dim,max_len=self.max_key)\n self.attn = Attention_logits(self.dim,max_len=self.max_key)\n\n self.key_type_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_type_ffn = nn.Linear(self.hidden_size,self.dim )\n self.key_multi_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_multi_ffn = nn.Linear(self.hidden_size,self.dim )\n self.key_single_ffn = nn.Linear(self.hidden_size,self.dim )\n self.value_single_ffn = nn.Linear(self.hidden_size,self.dim )\n\n self.classifier = nn.Linear(self.dim * 2,2)\n \"\"\"\n self.text_biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n \"\"\"\n def devide_entities(self,entities):\n \"\"\"\n devide entities into keys and values according there entities label\n return entities index\n \"\"\"\n entities_label_list = entities['label']\n key_index = [index for index,label in enumerate(entities_label_list) if label == 1]\n value_index = [index for index,label in enumerate(entities_label_list) if label == 2]\n\n key_num = len(key_index)\n value_num = len(value_index)\n\n M = self.max_key\n N = self.max_value \n\n if not key_num * value_num :\n key_index = [0]\n value_index = [1]\n \n if key_num > M :\n key_index = key_index[:M]\n if value_num > N:\n value_index = value_index[:N]\n\n return key_index, value_index \n\n def padding(self,data,N):\n # padding data 2,n,768 -> 2,N,768\n n = data.shape[0] \n dim = data.shape[1]\n device = data.device\n data = F.pad(data,(0,0,0,N-n))\n mask = torch.tensor([1.0]*n + [0.0]*(N-n),device=device)\n return data,mask \n\n def type_classifier(self,key,value,key_mask,value_mask):\n key = self.key_type_ffn(key)\n value = self.value_type_ffn(value)\n \n M = self.max_key\n N = self.max_value + 1\n logits_mask = key_mask.unsqueeze(2).repeat(1,1,N) * \\\n value_mask.unsqueeze(1).repeat(1,M,1) \n for i in range(self.rounds):\n \n logits = self.biaffine_type(key.unsqueeze(2).repeat(1,1,N,1),\n value.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n # B K V H\n logits = self.ffn_type(logits)\n logits = self.attn_type(logits,logits_mask)\n det_key,det_value = self.pooling(logits,key_mask,value_mask)\n key += det_key\n value += det_value \n else: \n logits = logits * logits_mask.unsqueeze(3).repeat(1,1,1,3)\n return logits \n \n def multi_classifier(self,key,value,key_mask,value_mask):\n key = self.key_multi_ffn(key)\n value = self.value_multi_ffn(value)\n\n M = key.shape[1]\n N = value.shape[1]\n \n key = key.unsqueeze(2).repeat(1,1,N,1)\n value = value.unsqueeze(1).repeat(1,M,1,1)\n\n multi_logits = self.classifier(torch.cat([key,value],dim=-1))\n\n return multi_logits \n \n def single_classifier(self,key,value,key_mask,value_mask):\n key = self.key_single_ffn(key)\n value = self.value_single_ffn(value)\n \n M = key.shape[1]\n N = value.shape[1]\n \n logits_mask = key_mask.unsqueeze(2).repeat(1,1,N) * \\\n value_mask.unsqueeze(1).repeat(1,M,1) \n \n for i in range(self.rounds):\n logits = self.biaffine(key.unsqueeze(2).repeat(1,1,N,1),\n value.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n # B K V H\n logits = self.ffn(logits)\n logits = self.attn(logits,logits_mask)\n det_key,det_value = self.pooling(logits,key_mask,value_mask)\n key += det_key\n value += det_value \n else: \n logits = logits * logits_mask.unsqueeze(3).repeat(1,1,1,2) \n\n return logits \n \n def forward(self, hidden_state, entities, relations, bbox):\n self.cnt+=1\n #layout_emb,text_emb = hidden_state \n B, max_len, H = hidden_state.shape\n device = hidden_state.device\n M = self.max_key\n N = self.max_value\n loss = 0\n all_pred_relations = []\n\n batch = []\n for b in range(B):\n if len(entities[b]['start']) <= 2:\n entities[b] = {\"end\":[1,1],\"label\":[0,0],\"start\":[0,0]}\n \n key_index,value_index = self.devide_entities(entities[b])\n start_token_index = torch.tensor(entities[b]['start'])\n key_start_token = start_token_index[key_index]\n value_start_token = start_token_index[value_index]\n #b,2,len,dim\n key = hidden_state[b][key_start_token,:]\n value = hidden_state[b][value_start_token,:]\n\n key,key_mask = self.padding(key,self.max_key)\n value = torch.cat([self.type_token,value],dim=0)\n value,value_mask = self.padding(value,self.max_value+1)\n\n batch.append((key,value,key_mask,value_mask))\n \n \n org_key = torch.stack([item[0] for item in batch],dim=0)\n org_value = torch.stack([item[1] for item in batch],dim=0)\n key_mask = torch.stack([item[2] for item in batch],dim=0)\n value_mask = torch.stack([item[3] for item in batch],dim=0)\n\n type_logits = self.type_classifier(org_key,org_value,key_mask,value_mask)\n \"\"\"\n self.type_token 0 - no link \n 1 - single link \n 2 - multilink\n B M N+1 3/\n \"\"\"\n \n org_value = org_value[:,1:,:]\n value_mask = value_mask[:,1:]\n\n type_token = self.softmax(type_logits[:,:,0])\n key_type = type_token.argmax(dim=-1)\n #so far we can get key label to route for downstream processing\n type_drop = key_type == 0\n type_single = key_type == 1\n type_multi = key_type == 2\n\n #multi_key = org_key[type_multi]\n multi_logits = self.multi_classifier(org_key,org_value,key_mask,value_mask)\n\n key_mask = key_mask.bool() & type_single\n single_logits = self.single_classifier(org_key,org_value,key_mask,value_mask)\n\n type_loss = self.get_type_loss(type_logits,key_mask,entities,relations)\n multi_loss = self.get_multi_loss(multi_logits,entities,relations)\n single_loss = self.get_single_loss(single_logits,entities,relations)\n\n loss = type_loss + multi_loss + single_loss \n all_pred_relations = self.get_predicted_relations(logits,entities,relations,key_mask,value_mask)\n\n return loss,all_pred_relations\n\n\n def pooling(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n M = key_mask.shape[1]\n N = value_mask.shape[1]\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, N - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_type_loss(self,type_logits,key_mask,entities,relations):\n # logits 2,64,65,3\n logits = self.softmax(type_logits[:,:,0])\n B = logits.shape[0]\n device = logits.device\n key_mask = key_mask.bool()\n loss_fcn = CrossEntropyLoss()\n\n for b in range(B):\n logit = logits[b][key_mask[b]]\n\n from IPython import embed;embed()\n relations \n\n def get_loss(self,logits,entities,relations,key_mask,value_mask):\n loss_fcn = CrossEntropyLoss()\n B = logits.shape[0]\n device = logits.device\n loss = 0\n all_logits = []\n all_labels = []\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n logit = logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n M,N,_ = logit.shape \n\n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n true_label = []\n for i in range(len(key_list)):\n try:\n key = key_index.index(key_list[i])\n value = value_index.index(value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n labels = labels.view(-1).to(dtype=torch.long)\n logit = logit.view(M*N,-1).to(dtype=torch.float)\n \n\n all_logits.append(logit)\n all_labels.append(labels)\n\n all_logits = torch.cat(all_logits,0)\n all_labels = torch.cat(all_labels,0)\n\n loss = loss_fcn(all_logits+1e-10,all_labels)\n return loss \n \n def get_predicted_relations(self,logits,entities,relations,key_mask,value_mask):\n #from IPython import embed;embed()\n softmax = nn.Softmax(dim=-1)\n all_pred_relations = []\n \n device = logits.device\n B = logits.shape[0]\n \n\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n pred_relations = []\n logit = logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n \n M,N,_ = logit.shape\n \n key_index,value_index = self.devide_entities(entities[b])\n for index in range(M*N):\n key = index // N\n value = index % N\n pred_label = logit[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = key_index[key] \n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n \n pred_relations.append(rel)\n \n all_pred_relations.append(pred_relations)\n return all_pred_relations \n \n def get_loss_1(self,l_logits,t_logits,entities,relations,key_mask,value_mask):\n loss_fcn = CrossEntropyLoss()\n B = l_logits.shape[0]\n device = l_logits.device\n loss = 0\n all_layout_logits = []\n all_text_logits = []\n all_labels = []\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n l_logit = l_logits[b][key_mask[b]]\n l_logit = l_logit[:,value_mask[b]]\n t_logit = t_logits[b][key_mask[b]]\n t_logit = t_logit[:,value_mask[b]]\n M,N,_ = l_logit.shape \n\n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n true_label = []\n for i in range(len(key_list)):\n try:\n key = key_index.index(key_list[i])\n value = value_index.index(value_list[i])\n labels[key][value] = 1\n true_label.append((key*N+value))\n except:\n continue\n \n labels = labels.view(-1).to(dtype=torch.long)\n layout_logit = l_logit.view(M*N,-1).to(dtype=torch.float)\n text_logit = t_logit.view(M*N,-1).to(dtype=torch.float)\n\n all_layout_logits.append(layout_logit)\n all_text_logits.append(text_logit)\n all_labels.append(labels)\n\n all_layout_logits = torch.cat(all_layout_logits,0)\n all_text_logits = torch.cat(all_text_logits,0)\n all_labels = torch.cat(all_labels,0)\n\n layout_loss = loss_fcn(all_layout_logits+1e-10,all_labels)\n text_loss = loss_fcn(all_text_logits+1e-10,all_labels)\n\n loss = 2*layout_loss + text_loss \n return loss \n \n def get_predicted_relations_1(self,l_logits,t_logits,entities,relations,key_mask,value_mask):\n #from IPython import embed;embed()\n softmax = nn.Softmax(dim=-1)\n all_pred_relations = []\n \n device = l_logits.device\n B = l_logits.shape[0]\n \n\n key_mask = key_mask.to(torch.bool)\n value_mask = value_mask.to(torch.bool)\n for b in range(B):\n pred_relations = []\n l_logit = l_logits[b][key_mask[b]]\n l_logit = l_logit[:,value_mask[b]]\n t_logit = t_logits[b][key_mask[b]]\n t_logit = t_logit[:,value_mask[b]]\n M,N,_ = l_logit.shape\n \n key_index,value_index = self.devide_entities(entities[b])\n key_list = relations[b]['head']\n value_list = relations[b]['tail']\n labels = torch.zeros(M*N,device=device).view(M,N)\n\n for index in range(M*N):\n key = index // N\n value = index % N\n layout_pred_label = l_logit[key][value].argmax(-1)\n text_pred_label = t_logit[key][value].argmax(-1)\n\n if layout_pred_label * text_pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = key_index[key] \n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n \n pred_relations.append(rel)\n \n all_pred_relations.append(pred_relations)\n return all_pred_relations " }, { "identifier": "GOSE", "path": "GOSEfinetune/modules/decoders/gose.py", "snippet": "class GOSE(nn.Module):\n def __init__(self, args):\n super().__init__()\n #(rounds,num_heads)\n # self.rounds = 4\n self.args = args\n self.rounds = args.rounds+1\n self.norm = False\n if args.backbone_name == 'lilt':\n self.hidden_size = 960\n elif args.backbone_name == 'xlm':\n self.hidden_size = 768\n self.hidden_dropout_prob = 0.5\n #默认only-mean pooling\n self.pooling_mode = args.pooling_mode\n self.use_gam = args.use_gam\n self.loss_fct = CrossEntropyLoss()\n self.use_prefix = args.use_prefix\n #---对global-attention使用稀疏注意力\n self.use_global_mask = args.use_global_mask\n #--------\n self.use_gate = args.use_gate\n print(f\"**********************************Backbone: {args.backbone_name}****************************\")\n print(f\"**********************************Use_GAM: {self.use_gam}************************************\")\n print(f\"**********************************Use_Prefix: {self.use_prefix}********************************\")\n print(f\"**********************************Use_Gate: {self.use_gate}************************************\")\n # print(f\"**********************************Use_Global_Mask: {self.use_global_mask}**********************\")\n print(f\"**********************************Pooling_Mode: {self.pooling_mode}****************************\")\n print(f\"**********************************Iterative_Rounds: {self.rounds-1}****************************\")\n print(f\"**************************************************************\")\n print(f\"**********************************No_Iteration: {self.args.no_it}********************************\")\n print(f\"**********************************No_Global: {self.args.no_global}********************************\")\n print(f\"**********************************Window_size: {self.args.window_size}********************************\")\n # self.mode = 'only-mean'\n # self.mode = 'only-max'\n # self.mode = 'attn-max'\n\n\n \n self.dropout = nn.Dropout(self.hidden_dropout_prob)\n self.elu=nn.ELU()\n self.biaffine = BiaffineAttention(self.hidden_size//2 , 2)\n self.ffn = nn.Linear(2, self.hidden_size//2)\n self.ffn_key = nn.Linear(self.hidden_size, self.hidden_size//2)\n self.ffn_value = nn.Linear(self.hidden_size, self.hidden_size//2)\n\n # attention config\n self.dim = self.hidden_size //2\n self.num_heads = 1\n self.num_tokens = 8 # max_len = 8\n self.window_size = args.window_size # 8 # window_size * S = H \n self.qkv_bias = False\n self.drop = 0\n self.attn_drop = 0\n self.drop_path = 0\n self.max_len = args.max_len #64\n self.norm1 = nn.LayerNorm(self.dim)\n self.norm2 = nn.LayerNorm(self.dim)\n self.global_token_num = args.global_token_num\n print(f\"**********************************Global_token: {self.global_token_num}****************************\")\n self.global_token = nn.Parameter(torch.zeros(1, self.global_token_num, self.hidden_size //2))\n self.attn = Attention(self.dim,num_heads=self.num_heads, num_tokens=self.num_tokens, \n window_size=self.window_size,qkv_bias=self.qkv_bias, \n attn_drop=self.attn_drop, proj_drop=self.drop, args=args)\n\n self.cnt = 0\n self.loss_fcn = CrossEntropyLoss()\n self.normal = True\n self.dummy_vec = nn.Parameter(torch.Tensor(1, self.hidden_size//2))\n nn.init.normal_(self.dummy_vec)\n #----gate\n self.gru = GRU(self.hidden_size//2) \n #---layout-prefix-tuning\n self.axis_dis_fn = nn.Linear(1, self.hidden_size//12)\n self.axis_angle_fn = nn.Linear(1, self.hidden_size//12)\n \n def create_global_mask(self):\n global_mask = torch.zeros(self.global_token_num, self.max_len, self.max_len).cuda()\n step = self.num_tokens\n for idx in range(self.global_token_num):\n row_ids = idx // self.num_tokens\n column_ids = idx % self.num_tokens\n row_start = row_ids * step\n column_start = column_ids * step\n global_mask[idx, row_start:row_start+self.num_tokens,:] = 1\n global_mask[idx, :, column_start:column_start+self.num_tokens] = 1\n return global_mask\n \n def get_entities_kv_index_list(self, entities):\n\n M = self.max_len\n entities_label = entities['label']\n\n entities_key_index = [index for index,label in enumerate(entities_label) if label == 1 ]\n entities_value_index = [index for index,label in enumerate(entities_label) if label == 2 ] \n key_num, value_num = len(entities_key_index),len(entities_value_index)\n '''\n in re.py\n if len(all_possible_relations) == 0:\n all_possible_relations = set([(0, 1)])\n '''\n if key_num * value_num == 0:\n #print(\"all_possible_relations == 0\")\n entities_key_index = [0]\n entities_value_index = [1]\n if key_num > M :\n entities_key_index = entities_key_index[:M]\n self.normal = False\n if value_num > M :\n entities_value_index = entities_value_index[:M]\n self.normal = False\n\n return entities_key_index, entities_value_index\n\n \n def forward(self, hidden_state, entities,relations, bbox):\n #if self.cnt == 30: set the num + 1 which failed\n # from IPython import embed;embed()\n self.cnt += 1\n B ,_ ,H = hidden_state.shape\n M = self.max_len\n device = hidden_state.device\n\n loss = 0\n all_pred_relations = []\n\n # B len(entities)\n # entities_label = torch.stack([torch.tensor(dict['label']) for dict in entities],dim=0)\n # padding to max_len M 64\n \n key_repr_list = []\n value_repr_list = []\n key_mask_list = []\n value_mask_list = []\n key_bbox_list, value_bbox_list = [], []\n for b in range(B):\n #key_repr ~ N,H -> 64,H/2\n #value_repr ~ M,H -> 64,H/2\n if len(entities[b][\"start\"]) <= 2:\n entities[b] = {\"end\": [1, 1], \"label\": [0, 0], \"start\": [0, 0]}\n \n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n entities_first_token_index = torch.tensor(entities[b]['start'])\n \n entities_key_first_token_index = entities_first_token_index[entities_key_index]\n entities_value_first_token_index = entities_first_token_index[entities_value_index]\n key_repr = hidden_state[b][entities_key_first_token_index,:]\n value_repr = hidden_state[b][entities_value_first_token_index,:]\n \n key_num,value_num = key_repr.shape[0],value_repr.shape[0]\n # padding key_repr key_num,H -> max_len,H\n # generate mask shape like max_len,H\n \n key_mask_list.append(torch.tensor([[1.]] * key_num + [[0.]] * (M - key_num),device=device).repeat(1,H//2))\n value_mask_list.append(torch.tensor([[1.]] * value_num + [[0.]] * (M - value_num),device=device).repeat(1,H//2))\n # padding key_repr key_num,H -> max_len,H\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, M - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, M - value_num)))\n #----得到kv实体的bbox\n key_bbox = bbox[b][entities_key_first_token_index]\n value_bbox = bbox[b][entities_value_first_token_index]\n key_bbox_list.append(F.pad(key_bbox,(0, 0, 0, M - key_num)))\n value_bbox_list.append(F.pad(value_bbox,(0, 0, 0, M - value_num)))\n\n # batch max_len hidden_size\n key_repr = torch.stack(key_repr_list,dim=0) \n key_mask = torch.stack(key_mask_list,dim=0)\n \n value_repr = torch.stack(value_repr_list,dim=0)\n value_mask = torch.stack(value_mask_list,dim=0)\n \n\n #key_mask * value_mask -> table_mask B,M,H * B,M,H -> B M M H\n table_mask = key_mask.unsqueeze(2).repeat(1,1,M,1)\\\n *value_mask.unsqueeze(1).repeat(1,M,1,1)\n #---global_mask\n if self.use_global_mask:\n self.global_mask = self.create_global_mask()\n global_mask = self.global_mask.unsqueeze(0).repeat(B,1,1,1) #shape[bsz,global_token_num,M,M]\n # global_mask = global_mask.view(B, self.global_token_num, -1)\n else:\n global_mask = None\n \n \n key_mask = key_mask[:,:,0].bool()\n value_mask = value_mask[:,:,0].bool()\n key_ffn = self.ffn_key(key_repr)\n value_ffn = self.ffn_value(value_repr)\n \n if self.norm == True:\n key_ffn = self.norm1(key_repr)\n value_ffn = self.norm1(value_repr)\n global_token = self.global_token.expand(B, -1, -1)\n key_bbox = torch.stack(key_bbox_list, dim=0) \n value_bbox = torch.stack(value_bbox_list, dim=0) \n layout_repr = self.calc_layout(key_bbox, value_bbox)\n layout_repr = layout_repr * table_mask\n layout_repr = layout_repr.view(B,M*M,H//2)\n for i in range(self.rounds):\n '''\n method 1 with biaffine \n \n table_mask.shape B M M H/2 -> B M M H (M=64)\n table_logits.shape B M M H/2 -> B M M 2\n B M M 2 -> B M M H\n attention input B (64+1)*64 384\n table input 64 * 64 \n window_size 8\n token_num 64/8 * 64/8 = 64\n '''\n #key_ffn = self.ffn_key(key_repr)\n #value_ffn = self.ffn_value(value_repr)\n #key_ffn = self.ffn_key(key_ffn)\n #value_ffn = self.ffn_value(value_ffn)\n \n table_logits = self.biaffine(key_ffn.unsqueeze(2).repeat(1,1,M,1),\n value_ffn.unsqueeze(1).repeat(1,M,1,1))\n if i < self.rounds-1:\n table_logits = self.ffn(table_logits) * table_mask\n \n if self.use_gam:\n table_logits = table_logits.view(B,M*M,H//2)\n \n table_logits = torch.cat((global_token, table_logits), dim=1)\n if self.use_prefix:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=layout_repr, key_num=key_num, value_num=value_num)\n else:\n table_logits = self.attn(table_logits, M, M, table_mask, global_mask, layout_prefix=None)\n global_token_new = table_logits[:,:self.global_token_num,:]\n global_token = global_token + global_token_new\n table_logits = table_logits[:,self.global_token_num:,:]\n table_logits = table_logits.view(B,M,M,H//2)\n table_logits = table_logits * table_mask\n key_new, value_new = self.get_new_repr(table_logits, key_mask, value_mask)\n if self.norm == True:\n key_new = self.norm2(key_new)\n value_new = self.norm2(value_new)\n if self.use_gate:\n key_ffn = self.gru(key_ffn,key_new)\n value_ffn = self.gru(value_ffn,value_new)\n \n elif self.args.no_it:\n key_ffn = key_new\n value_ffn = value_new\n elif self.args.use_add:\n key_ffn = key_ffn + key_new\n value_ffn = value_ffn + value_new \n else:\n table_logits = table_logits * table_mask[:,:,:,:2]\n\n # table_logits M N 2\n # table_logits.unsqueeze(0)\n # batch_table_logits = table_logits if batch_table_logits == None else torch.cat((batch_table_logits,table_logits),dim=0)\n\n loss = self.get_loss(table_logits,entities,relations,key_mask,value_mask)\n all_pred_relations = self.get_predicted_relations(table_logits,entities,key_mask,value_mask, bbox)\n return loss,all_pred_relations\n \n def calc_layout(self, head_bbox, tail_bbox):\n bsz, num, _ = head_bbox.shape\n head_bbox = head_bbox.unsqueeze(2).repeat(1,1,num,1)\n tail_bbox = tail_bbox.unsqueeze(1).repeat(1,num,1,1)\n \n #-----中心点坐标特征\n head_bbox_center = torch.div(torch.cat(((head_bbox[:,:,:,0]+head_bbox[:,:,:,2]).view(-1,1), (head_bbox[:,:,:,1]+head_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n tail_bbox_center = torch.div(torch.cat(((tail_bbox[:,:,:,0]+tail_bbox[:,:,:,2]).view(-1,1), (tail_bbox[:,:,:,1]+tail_bbox[:,:,:,3]).view(-1,1)),dim=1), 2)\n head_tail_center_dis, hea_tail_center_angle = self.axis_features(head_bbox_center, tail_bbox_center)\n head_tail_center_dis_feature = self.axis_dis_fn(head_tail_center_dis)\n head_tail_center_angle_feature = self.axis_angle_fn(hea_tail_center_angle)\n #-----左上点坐标特征\n head_bbox_left_top = torch.cat((head_bbox[:,:,:, 0].view(-1,1), head_bbox[:,:,:, 1].view(-1,1)), dim=1)\n tail_bbox_left_top = torch.cat((tail_bbox[:,:,:, 0].view(-1,1), tail_bbox[:,:,:, 1].view(-1,1)), dim=1)\n head_tail_lt_dis, hea_tail_lt_angle = self.axis_features(head_bbox_left_top, tail_bbox_left_top)\n head_tail_lt_dis_feature = self.axis_dis_fn(head_tail_lt_dis)\n hea_tail_lt_angle_feature = self.axis_angle_fn(hea_tail_lt_angle)\n #-----右下点坐标特征\n head_bbox_right_down = torch.cat((head_bbox[:,:,:, 2].view(-1,1), head_bbox[:,:,:, 3].view(-1,1)), dim=1)\n tail_bbox_right_down = torch.cat((tail_bbox[:,:,:, 2].view(-1,1), tail_bbox[:,:,:, 3].view(-1,1)), dim=1)\n head_tail_rd_dis, hea_tail_rd_angle = self.axis_features(head_bbox_right_down, tail_bbox_right_down)\n head_tail_rd_dis_feature = self.axis_dis_fn(head_tail_rd_dis)\n hea_tail_rd_angle_feature = self.axis_angle_fn(hea_tail_rd_angle)\n layout_repr = torch.cat(\n (head_tail_center_dis_feature, head_tail_center_angle_feature\n , head_tail_lt_dis_feature, hea_tail_lt_angle_feature\n , head_tail_rd_dis_feature, hea_tail_rd_angle_feature\n ),\n dim=-1\n )\n layout_repr = layout_repr.view(bsz, num, num, -1) \n return layout_repr\n \n \n \n def axis_features(self, tmp_bbox_1, tmp_bbox_2):\n tmp_bbox_distance = torch.pow(torch.sum(torch.pow(tmp_bbox_1 - tmp_bbox_2, 2), dim=1), 0.5) #欧氏距离\n tmp_bbox_distance = tmp_bbox_distance.view(-1, 1)\n ##########计算角度\n head_tail_x = tmp_bbox_1[:, 0] - tmp_bbox_2[:, 0]\n head_tail_y = tmp_bbox_1[:, 1] - tmp_bbox_2[:, 1]\n tmp_bbox_angle = torch.div(torch.atan2(head_tail_y, head_tail_x), 3.1416) #正切的角度\n tmp_bbox_angle = tmp_bbox_angle.view(-1, 1)\n return torch.div(tmp_bbox_distance, 1000), tmp_bbox_angle\n\n \n \n \n def get_new_repr(self, table_logits, key_mask, value_mask):\n key_repr_list = []\n value_repr_list = []\n bs,_,_,_ = table_logits.shape\n for b in range(bs):\n logit = table_logits[b][key_mask[b]]\n logit = logit[:,value_mask[b]]\n key_num, value_num, _ = logit.shape\n if self.pooling_mode == 'max':\n key_repr = logit.max(dim=1).values \n value_repr = logit.max(dim=0).values \n else:\n key_repr = logit.mean(dim=1)\n value_repr = logit.mean(dim=0)\n key_repr_list.append(F.pad(key_repr,(0, 0, 0, self.max_len - key_num)))\n value_repr_list.append(F.pad(value_repr,(0, 0, 0, self.max_len - value_num)))\n key_new = torch.stack(key_repr_list,dim=0) \n value_new = torch.stack(value_repr_list,dim=0)\n return key_new, value_new\n \n def get_predicted_relations(self, logists,entities,key_mask,value_mask,bbox):\n all_pred_relations = []\n #logits.shape B,M,N,2\n #here is one batch so no dim B\n B,N,M,_=logists.shape\n for b in range(B):\n\n pred_relations = []\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n \n #---index指的是序列中的第几个实体\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n # if len(entities_key_index) > 64 or len(entities_value_index) > 64:\n # from IPython import embed;embed();exit()\n \n for index in range(M*N):\n key = index // M\n value = index % M\n pred_label = logist[key][value].argmax(-1)\n\n if pred_label == 0:\n continue\n \n rel = {}\n rel[\"head_id\"] = entities_key_index[key]\n rel[\"head\"] = (entities[b][\"start\"][rel[\"head_id\"]], entities[b][\"end\"][rel[\"head_id\"]])\n rel[\"head_type\"] = entities[b][\"label\"][rel[\"head_id\"]]\n\n rel[\"tail_id\"] = entities_value_index[value]\n rel[\"tail\"] = (entities[b][\"start\"][rel[\"tail_id\"]], entities[b][\"end\"][rel[\"tail_id\"]])\n rel[\"tail_type\"] = entities[b][\"label\"][rel[\"tail_id\"]]\n rel[\"type\"] = 1\n key_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"head_id\"]]].tolist()[:2]\n value_bbox_left_top = bbox[b][entities[b][\"start\"][rel[\"tail_id\"]]].tolist()[:2]\n rel[\"link\"] = (tuple(key_bbox_left_top), tuple(value_bbox_left_top))\n #--------\n pred_relations.append(rel)\n all_pred_relations.append(pred_relations)\n \n return all_pred_relations\n \n \n def get_loss(self,logists,entities,relations,key_mask,value_mask):\n #mask B M M H\n device = logists.device\n loss = 0\n B = key_mask.shape[0]\n all_logits = []\n all_labels = []\n for b in range(B):\n # 64,64 -> N,M\n logist = logists[b][key_mask[b]]\n logist = logist[:,value_mask[b]]\n N,M,_ = logist.shape\n\n\n entities_key_index, entities_value_index = self.get_entities_kv_index_list(entities[b])\n \n entities_key_list = relations[b]['head']\n entities_value_list = relations[b]['tail']\n\n labels = torch.zeros(N*M).to(device).view(N,M)\n \n for i in range(len(entities_key_list)):\n try:\n key = entities_key_index.index(entities_key_list[i])\n value = entities_value_index.index(entities_value_list[i])\n labels[key][value] = 1\n except:\n continue\n \n \n labels = labels.view(-1).to(dtype=torch.long)\n logist = logist.view(N*M,-1).to(dtype=torch.float)\n all_logits.append(logist)\n all_labels.append(labels)\n all_logits = torch.cat(all_logits, 0)\n all_labels = torch.cat(all_labels, 0)\n loss = self.loss_fcn(all_logits+1e-10, all_labels)\n if (torch.isnan(loss).sum().item() > 0):\n loss = torch.where(torch.isnan(loss), torch.zeros_like(loss), loss)\n \n return loss" }, { "identifier": "ReOutput", "path": "GOSEfinetune/utils.py", "snippet": "class ReOutput(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: torch.FloatTensor = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n entities: Optional[Dict] = None\n relations: Optional[Dict] = None\n pred_relations: Optional[Dict] = None" } ]
import math import torch import torch.nn as nn import torch.utils.checkpoint import os from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN, gelu from transformers.file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from .configuration_LiLTRobertaLike import LiLTRobertaLikeConfig from dataclasses import dataclass from typing import Dict, Optional, Tuple from transformers.file_utils import ModelOutput from ...modules.decoders.RE import RE from ...modules.decoders.gose import GOSE from ...utils import ReOutput
12,301
output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ), layout_encoder_outputs class LiLTRobertaLikeForTokenClassification(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size + config.hidden_size//config.channel_shrink_ratio, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LiLTRobertaLikeForRelationExtraction(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.input_type = config.input_type self.freeze_model = config.freeze_model print(f'=============model freeze {self.freeze_model}===============') #from IPython import embed;embed() self.decoder = config.decoder_name if self.decoder == 're': self.extractor = REDecoder(config, config.hidden_size + config.hidden_size // config.channel_shrink_ratio) elif self.decoder == 'gose':
# coding=utf-8 logger = logging.get_logger(__name__) class LiLTRobertaLikeTextEmbeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids( input_ids, self.padding_idx, past_key_values_length ).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings, position_ids def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LiLTRobertaLikeLayoutEmbeddings(nn.Module): def __init__(self, config): super(LiLTRobertaLikeLayoutEmbeddings, self).__init__() self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size // 6) self.padding_idx = config.pad_token_id self.box_position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size//config.channel_shrink_ratio, padding_idx=self.padding_idx ) self.box_linear_embeddings = nn.Linear(in_features=config.hidden_size, out_features=config.hidden_size//config.channel_shrink_ratio) self.LayerNorm = nn.LayerNorm(config.hidden_size//config.channel_shrink_ratio, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward( self, bbox=None, position_ids=None, ): try: left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0]) upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1]) right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2]) lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3]) except IndexError as e: raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1]) w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0]) spatial_position_embeddings = torch.cat( [ left_position_embeddings, upper_position_embeddings, right_position_embeddings, lower_position_embeddings, h_position_embeddings, w_position_embeddings, ], dim=-1, ) spatial_position_embeddings = self.box_linear_embeddings(spatial_position_embeddings) box_position_embeddings = self.box_position_embeddings(position_ids) spatial_position_embeddings = spatial_position_embeddings + box_position_embeddings spatial_position_embeddings = self.LayerNorm(spatial_position_embeddings) spatial_position_embeddings = self.dropout(spatial_position_embeddings) return spatial_position_embeddings class LiLTRobertaLikeSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.layout_query = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_key = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.layout_value = nn.Linear(config.hidden_size // config.channel_shrink_ratio, self.all_head_size // config.channel_shrink_ratio) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder self.channel_shrink_ratio = config.channel_shrink_ratio def transpose_for_scores(self, x, r=1): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size//r) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): layout_value_layer = self.transpose_for_scores(self.layout_value(layout_inputs), r=self.channel_shrink_ratio) layout_key_layer = self.transpose_for_scores(self.layout_key(layout_inputs), r=self.channel_shrink_ratio) layout_query_layer = self.transpose_for_scores(self.layout_query(layout_inputs), r=self.channel_shrink_ratio) mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) layout_attention_scores = torch.matmul(layout_query_layer, layout_key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key tmp_attention_scores = attention_scores / math.sqrt(self.attention_head_size) tmp_layout_attention_scores = layout_attention_scores / math.sqrt(self.attention_head_size//self.channel_shrink_ratio) attention_scores = tmp_attention_scores + tmp_layout_attention_scores layout_attention_scores = tmp_layout_attention_scores + tmp_attention_scores if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) layout_attention_scores = layout_attention_scores + attention_mask # Normalize the attention scores to probabilities. layout_attention_probs = nn.Softmax(dim=-1)(layout_attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. layout_attention_probs = self.dropout(layout_attention_probs) # Mask heads if we want to if head_mask is not None: layout_attention_probs = layout_attention_probs * head_mask layout_context_layer = torch.matmul(layout_attention_probs, layout_value_layer) layout_context_layer = layout_context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = layout_context_layer.size()[:-2] + (self.all_head_size//self.channel_shrink_ratio,) layout_context_layer = layout_context_layer.view(*new_context_layer_shape) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in RobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = ((context_layer, layout_context_layer), attention_probs) if output_attentions else ((context_layer, layout_context_layer),) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class LiLTRobertaLikeSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeAttention(nn.Module): def __init__(self, config): super().__init__() self.self = LiLTRobertaLikeSelfAttention(config) self.output = LiLTRobertaLikeSelfOutput(config) self.pruned_heads = set() ori_hidden_size = config.hidden_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio self.layout_output = LiLTRobertaLikeSelfOutput(config) config.hidden_size = ori_hidden_size def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, layout_inputs, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0][0], hidden_states) layout_attention_output = self.layout_output(self_outputs[0][1], layout_inputs) outputs = ((attention_output, layout_attention_output),) + self_outputs[1:] # add attentions if we output them return outputs class LiLTRobertaLikeIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class LiLTRobertaLikeOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LiLTRobertaLikeLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = LiLTRobertaLikeAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = LiLTRobertaLikeAttention(config) self.intermediate = LiLTRobertaLikeIntermediate(config) self.output = LiLTRobertaLikeOutput(config) ori_hidden_size = config.hidden_size ori_intermediate_size = config.intermediate_size config.hidden_size = config.hidden_size // config.channel_shrink_ratio config.intermediate_size = config.intermediate_size // config.channel_shrink_ratio self.layout_intermediate = LiLTRobertaLikeIntermediate(config) self.layout_output = LiLTRobertaLikeOutput(config) config.hidden_size = ori_hidden_size config.intermediate_size = ori_intermediate_size def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, layout_inputs, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0][0] layout_attention_output = self_attention_outputs[0][1] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) layout_layer_output = apply_chunking_to_forward( self.layout_feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, layout_attention_output ) outputs = ((layer_output, layout_layer_output),) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def layout_feed_forward_chunk(self, attention_output): intermediate_output = self.layout_intermediate(attention_output) layer_output = self.layout_output(intermediate_output, attention_output) return layer_output class LiLTRobertaLikeEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LiLTRobertaLikeLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, layout_inputs, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warning( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layout_inputs, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0][0] layout_inputs = layer_outputs[0][1] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ), layout_inputs return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ), layout_inputs class LiLTRobertaLikePooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class LiLTRobertaLikePreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LiLTRobertaLikeConfig base_model_prefix = "liltrobertalike" def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) class LiLTRobertaLikeModel(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = LiLTRobertaLikeTextEmbeddings(config) self.layout_embeddings = LiLTRobertaLikeLayoutEmbeddings(config) self.encoder = LiLTRobertaLikeEncoder(config) self.pooler = LiLTRobertaLikePooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if bbox is None: bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output, position_ids = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) layout_embedding_output = self.layout_embeddings( bbox=bbox, position_ids=position_ids, ) encoder_outputs, layout_encoder_outputs = self.encoder( embedding_output, layout_embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ), layout_encoder_outputs class LiLTRobertaLikeForTokenClassification(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size + config.hidden_size//config.channel_shrink_ratio, config.num_labels) self.init_weights() def forward( self, input_ids=None, bbox=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs, layout_outputs = self.lilt( input_ids, bbox=bbox, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = torch.cat([sequence_output, layout_outputs], -1) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LiLTRobertaLikeForRelationExtraction(LiLTRobertaLikePreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids"] def __init__(self, config): super().__init__(config) self.lilt = LiLTRobertaLikeModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.input_type = config.input_type self.freeze_model = config.freeze_model print(f'=============model freeze {self.freeze_model}===============') #from IPython import embed;embed() self.decoder = config.decoder_name if self.decoder == 're': self.extractor = REDecoder(config, config.hidden_size + config.hidden_size // config.channel_shrink_ratio) elif self.decoder == 'gose':
self.extractor = GOSE(config)
2
2023-10-19 14:36:32+00:00
16k
BurgerBurgerBurger/AA
run.py
[ { "identifier": "add_args", "path": "args.py", "snippet": "def add_args(parser):\n parser.add_argument(\"--do_train\", action=\"store_true\")\n parser.add_argument(\"--data_dir\", default=\"./dataset/docred\", type=str)\n parser.add_argument(\"--transformer_type\", default=\"bert\", type=str)\n parser.add_argument(\"--model_name_or_path\", default=\"bert-base-cased\", type=str)\n\n parser.add_argument(\"--train_file\", default=\"train_annotated.json\", type=str)\n parser.add_argument(\"--dev_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--test_file\", default=\"dev.json\", type=str)\n parser.add_argument(\"--pred_file\", default=\"results.json\", type=str)\n parser.add_argument(\"--save_path\", default=\"\", type=str)\n parser.add_argument(\"--load_path\", default=\"\", type=str)\n parser.add_argument(\"--results_path\", default=\"\", type=str)\n parser.add_argument(\"--teacher_sig_path\", default=\"\", type=str)\n parser.add_argument(\"--save_attn\", action=\"store_true\", help=\"Whether store the evidence distribution or not\")\n\n # graph\n parser.add_argument(\"--attn_heads\", default=2, type=int, help=\"Attention heads\")\n parser.add_argument(\"--gcn_layers\", default=2, type=int, help=\"GCN layers\")\n parser.add_argument(\"--iters\", default=2, type=int, help=\"Iteration\")\n parser.add_argument(\"--use_graph\", action=\"store_true\", help=\"Use graph\")\n\n parser.add_argument(\"--config_name\", default=\"\", type=str,\n help=\"Pretrained config name or path if not the same as model_name\")\n parser.add_argument(\"--tokenizer_name\", default=\"\", type=str,\n help=\"Pretrained tokenizer name or path if not the same as model_name\")\n parser.add_argument(\"--max_seq_length\", default=1024, type=int,\n help=\"The maximum total input sequence length after tokenization. Sequences longer \"\n \"than this will be truncated, sequences shorter will be padded.\")\n\n parser.add_argument(\"--train_batch_size\", default=4, type=int,\n help=\"Batch size for training.\")\n parser.add_argument(\"--test_batch_size\", default=8, type=int,\n help=\"Batch size for testing.\")\n parser.add_argument(\"--eval_mode\", default=\"single\", type=str,\n choices=[\"single\", \"fushion\"], \n help=\"Single-pass evaluation or evaluation with inference-stage fusion.\")\n parser.add_argument(\"--gradient_accumulation_steps\", default=1, type=int,\n help=\"Number of updates steps to accumulate before performing a backward/update pass.\")\n parser.add_argument(\"--num_labels\", default=4, type=int,\n help=\"Max number of labels in prediction.\")\n parser.add_argument(\"--max_sent_num\", default=25, type=int,\n help=\"Max number of sentences in each document.\")\n parser.add_argument(\"--evi_thresh\", default=0.2, type=float,\n help=\"Evidence Threshold. \")\n parser.add_argument(\"--evi_lambda\", default=0.1, type=float,\n help=\"Weight of relation-agnostic evidence loss during training. \")\n parser.add_argument(\"--attn_lambda\", default=1.0, type=float,\n help=\"Weight of knowledge distillation loss for attentions during training. \")\n parser.add_argument(\"--lr_transformer\", default=5e-5, type=float,\n help=\"The initial learning rate for transformer.\")\n parser.add_argument(\"--lr_added\", default=1e-4, type=float,\n help=\"The initial learning rate for added modules.\")\n parser.add_argument(\"--adam_epsilon\", default=1e-6, type=float,\n help=\"Epsilon for Adam optimizer.\")\n parser.add_argument(\"--max_grad_norm\", default=1.0, type=float,\n help=\"Max gradient norm.\")\n parser.add_argument(\"--warmup_ratio\", default=0.06, type=float,\n help=\"Warm up ratio for Adam.\")\n parser.add_argument(\"--num_train_epochs\", default=30.0, type=float,\n help=\"Total number of training epochs to perform.\")\n parser.add_argument(\"--evaluation_steps\", default=-1, type=int,\n help=\"Number of training steps between evaluations.\")\n parser.add_argument(\"--seed\", type=int, default=66,\n help=\"random seed for initialization\")\n parser.add_argument(\"--num_class\", type=int, default=97,\n help=\"Number of relation types in dataset.\")\n\n return parser" }, { "identifier": "DocREModel", "path": "model.py", "snippet": "class DocREModel(nn.Module):\n\n def __init__(self, args, config, model, tokenizer,\n emb_size=768, block_size=64, num_labels=-1,\n max_sent_num=25, evi_thresh=0.2):\n super().__init__()\n self.config = config\n self.model = model\n self.tokenizer = tokenizer\n self.hidden_size = config.hidden_size\n\n self.loss_fnt = ATLoss()\n self.loss_fnt_evi = nn.KLDivLoss(reduction=\"batchmean\")\n\n self.head_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n self.tail_extractor = nn.Linear(self.hidden_size * 2, emb_size)\n\n self.use_graph = args.use_graph\n if self.use_graph:\n self.head_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.tail_extractor = nn.Linear(3 * config.hidden_size, emb_size)\n self.bilinear = nn.Linear(emb_size * block_size, config.num_labels)\n\n self.emb_size = emb_size\n self.block_size = block_size\n self.num_labels = num_labels\n self.total_labels = config.num_labels\n self.max_sent_num = max_sent_num\n self.evi_thresh = evi_thresh\n\n self.edges = ['self-loop', 'mention-anaphor', 'co-reference', 'inter-entity']\n\n if self.use_graph:\n self.graph_layers = nn.ModuleList(\n AttentionGCNLayer(self.edges, self.hidden_size, nhead=args.attn_heads, iters=args.gcn_layers) for _ in\n range(args.iters))\n\n def encode(self, input_ids, attention_mask):\n config = self.config\n if config.transformer_type == \"bert\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id]\n elif config.transformer_type == \"roberta\":\n start_tokens = [config.cls_token_id]\n end_tokens = [config.sep_token_id, config.sep_token_id]\n # process long documents.\n sequence_output, attention = process_long_input(self.model, input_ids, attention_mask, start_tokens, end_tokens)\n\n return sequence_output, attention\n\n def get_hrt(self, sequence_output, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n hss, tss, rss = [], [], []\n ht_atts = []\n\n for i in range(len(entity_pos)): # for each batch\n entity_embs, entity_atts = [], []\n\n # obtain entity embedding from mention embeddings.\n for eid, e in enumerate(entity_pos[i]): # for each entity\n if len(e) > 1:\n e_emb, e_att = [], []\n for mid, (start, end) in enumerate(e): # for every mention\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n e_emb.append(sequence_output[i, start + offset])\n e_att.append(attention[i, :, start + offset])\n\n if len(e_emb) > 0:\n e_emb = torch.logsumexp(torch.stack(e_emb, dim=0), dim=0)\n e_att = torch.stack(e_att, dim=0).mean(0)\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n else:\n start, end = e[0]\n if start + offset < c:\n e_emb = sequence_output[i, start + offset]\n e_att = attention[i, :, start + offset]\n else:\n e_emb = torch.zeros(self.config.hidden_size).to(sequence_output)\n e_att = torch.zeros(h, c).to(attention)\n\n entity_embs.append(e_emb)\n entity_atts.append(e_att)\n\n entity_embs = torch.stack(entity_embs, dim=0) # [n_e, d]\n entity_atts = torch.stack(entity_atts, dim=0) # [n_e, h, seq_len]\n\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n\n # obtain subject/object (head/tail) embeddings from entity embeddings.\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n\n h_att = torch.index_select(entity_atts, 0, ht_i[:, 0])\n t_att = torch.index_select(entity_atts, 0, ht_i[:, 1])\n\n ht_att = (h_att * t_att).mean(1) # average over all heads\n ht_att = ht_att / (ht_att.sum(1, keepdim=True) + 1e-30)\n ht_atts.append(ht_att)\n\n # obtain local context embeddings.\n rs = contract(\"ld,rl->rd\", sequence_output[i], ht_att)\n\n hss.append(hs)\n tss.append(ts)\n rss.append(rs)\n\n rels_per_batch = [len(b) for b in hss]\n hss = torch.cat(hss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n tss = torch.cat(tss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n rss = torch.cat(rss, dim=0) # (num_ent_pairs_all_batches, emb_size)\n ht_atts = torch.cat(ht_atts, dim=0) # (num_ent_pairs_all_batches, max_doc_len)\n\n return hss, rss, tss, ht_atts, rels_per_batch\n\n def graph(self, sequence_output, graphs, attention, entity_pos, hts, offset):\n n, h, _, c = attention.size()\n\n max_node = max([graph.shape[0] for graph in graphs])\n graph_fea = torch.zeros(n, max_node, self.config.hidden_size, device=sequence_output.device)\n graph_adj = torch.zeros(n, max_node, max_node, device=sequence_output.device)\n\n for i, graph in enumerate(graphs):\n nodes_num = graph.shape[0]\n graph_adj[i, :nodes_num, :nodes_num] = torch.from_numpy(graph)\n\n for i in range(len(entity_pos)):\n mention_index = 0\n for e in entity_pos[i]:\n for start, end in e:\n if start + offset < c:\n # In case the entity mention is truncated due to limited max seq length.\n graph_fea[i, mention_index, :] = sequence_output[i, start + offset]\n else:\n graph_fea[i, mention_index, :] = torch.zeros(self.config.hidden_size).to(sequence_output)\n mention_index += 1\n\n for graph_layer in self.graph_layers:\n graph_fea, _ = graph_layer(graph_fea, graph_adj)\n\n h_entity, t_entity = [], []\n for i in range(len(entity_pos)):\n entity_embs = []\n mention_index = 0\n for e in entity_pos[i]:\n e_emb = graph_fea[i, mention_index:mention_index + len(e), :]\n mention_index += len(e)\n\n e_emb = torch.logsumexp(e_emb, dim=0) if len(e) > 1 else e_emb.squeeze(0)\n entity_embs.append(e_emb)\n\n entity_embs = torch.stack(entity_embs, dim=0)\n ht_i = torch.LongTensor(hts[i]).to(sequence_output.device)\n hs = torch.index_select(entity_embs, 0, ht_i[:, 0])\n ts = torch.index_select(entity_embs, 0, ht_i[:, 1])\n h_entity.append(hs)\n t_entity.append(ts)\n\n h_entity = torch.cat(h_entity, dim=0)\n t_entity = torch.cat(t_entity, dim=0)\n return h_entity, t_entity\n\n def forward_rel(self, hs, ts, rs, h, t):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs, h], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs, t], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_rel_no_graph(self, hs, ts, rs):\n hs = torch.tanh(self.head_extractor(torch.cat([hs, rs], dim=-1)))\n ts = torch.tanh(self.tail_extractor(torch.cat([ts, rs], dim=-1)))\n # split into several groups.\n b1 = hs.view(-1, self.emb_size // self.block_size, self.block_size)\n b2 = ts.view(-1, self.emb_size // self.block_size, self.block_size)\n\n bl = (b1.unsqueeze(3) * b2.unsqueeze(2)).view(-1, self.emb_size * self.block_size)\n logits = self.bilinear(bl)\n\n return logits\n\n def forward_evi(self, doc_attn, sent_pos, batch_rel, offset):\n max_sent_num = max([len(sent) for sent in sent_pos])\n rel_sent_attn = []\n for i in range(len(sent_pos)): # for each batch\n # the relation ids corresponds to document in batch i is [sum(batch_rel[:i]), sum(batch_rel[:i+1]))\n curr_attn = doc_attn[sum(batch_rel[:i]):sum(batch_rel[:i + 1])]\n curr_sent_pos = [torch.arange(s[0], s[1]).to(curr_attn.device) + offset for s in sent_pos[i]] # + offset\n\n curr_attn_per_sent = [curr_attn.index_select(-1, sent) for sent in curr_sent_pos]\n curr_attn_per_sent += [torch.zeros_like(curr_attn_per_sent[0])] * (max_sent_num - len(curr_attn_per_sent))\n sum_attn = torch.stack([attn.sum(dim=-1) for attn in curr_attn_per_sent],\n dim=-1) # sum across those attentions\n rel_sent_attn.append(sum_attn)\n\n s_attn = torch.cat(rel_sent_attn, dim=0)\n return s_attn\n\n def forward(self,\n input_ids=None,\n attention_mask=None,\n labels=None, # relation labels\n entity_pos=None,\n hts=None, # entity pairs\n sent_pos=None,\n sent_labels=None, # evidence labels (0/1)\n teacher_attns=None, # evidence distribution from teacher model\n graph=None,\n tag=\"train\"\n ):\n\n offset = 1 if self.config.transformer_type in [\"bert\", \"roberta\"] else 0\n output = {}\n sequence_output, attention = self.encode(input_ids, attention_mask)\n\n hs, rs, ts, doc_attn, batch_rel = self.get_hrt(sequence_output, attention, entity_pos, hts, offset)\n\n if self.use_graph:\n h, t = self.graph(sequence_output, graph, attention, entity_pos, hts, offset)\n logits = self.forward_rel(hs, ts, rs, h, t)\n else:\n logits = self.forward_rel_no_graph(hs, ts, rs)\n\n output[\"rel_pred\"] = self.loss_fnt.get_label(logits, num_labels=self.num_labels)\n\n if sent_labels is not None: # human-annotated evidence available\n\n s_attn = self.forward_evi(doc_attn, sent_pos, batch_rel, offset)\n output[\"evi_pred\"] = F.pad(s_attn > self.evi_thresh, (0, self.max_sent_num - s_attn.shape[-1]))\n\n if tag in [\"test\", \"dev\"]: # testing\n scores_topk = self.loss_fnt.get_score(logits, self.num_labels)\n output[\"scores\"] = scores_topk[0]\n output[\"topks\"] = scores_topk[1]\n\n if tag == \"infer\": # teacher model inference\n output[\"attns\"] = doc_attn.split(batch_rel)\n\n else: # training\n # relation extraction loss\n loss = self.loss_fnt(logits.float(), labels.float())\n output[\"loss\"] = {\"rel_loss\": loss.to(sequence_output)}\n\n if sent_labels is not None: # supervised training with human evidence\n\n idx_used = torch.nonzero(labels[:, 1:].sum(dim=-1)).view(-1)\n # evidence retrieval loss (kldiv loss)\n s_attn = s_attn[idx_used]\n sent_labels = sent_labels[idx_used]\n norm_s_labels = sent_labels / (sent_labels.sum(dim=-1, keepdim=True) + 1e-30)\n norm_s_labels[norm_s_labels == 0] = 1e-30\n s_attn[s_attn == 0] = 1e-30\n evi_loss = self.loss_fnt_evi(s_attn.log(), norm_s_labels)\n output[\"loss\"][\"evi_loss\"] = evi_loss.to(sequence_output)\n\n elif teacher_attns is not None: # self training with teacher attention\n\n doc_attn[doc_attn == 0] = 1e-30\n teacher_attns[teacher_attns == 0] = 1e-30\n attn_loss = self.loss_fnt_evi(doc_attn.log(), teacher_attns)\n output[\"loss\"][\"attn_loss\"] = attn_loss.to(sequence_output)\n\n return output" }, { "identifier": "set_seed", "path": "utils.py", "snippet": "def set_seed(args):\n seed = int(args.seed)\n random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n torch.use_deterministic_algorithms(True)" }, { "identifier": "collate_fn", "path": "utils.py", "snippet": "def collate_fn(batch):\n max_len = max([len(f[\"input_ids\"]) for f in batch])\n max_sent = max([len(f[\"sent_pos\"]) for f in batch])\n input_ids = [f[\"input_ids\"] + [0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n input_mask = [[1.0] * len(f[\"input_ids\"]) + [0.0] * (max_len - len(f[\"input_ids\"])) for f in batch]\n labels = [f[\"labels\"] for f in batch]\n entity_pos = [f[\"entity_pos\"] for f in batch]\n hts = [f[\"hts\"] for f in batch]\n sent_pos = [f[\"sent_pos\"] for f in batch]\n sent_labels = [f[\"sent_labels\"] for f in batch if \"sent_labels\" in f]\n attns = [f[\"attns\"] for f in batch if \"attns\" in f]\n\n input_ids = torch.tensor(input_ids, dtype=torch.long)\n input_mask = torch.tensor(input_mask, dtype=torch.float)\n\n labels = [torch.tensor(label) for label in labels]\n labels = torch.cat(labels, dim=0)\n\n if sent_labels != [] and None not in sent_labels:\n sent_labels_tensor = []\n for sent_label in sent_labels:\n sent_label = np.array(sent_label)\n sent_labels_tensor.append(np.pad(sent_label, ((0, 0), (0, max_sent - sent_label.shape[1]))))\n sent_labels_tensor = torch.from_numpy(np.concatenate(sent_labels_tensor, axis=0))\n else:\n sent_labels_tensor = None\n\n if attns:\n attns = [np.pad(attn, ((0, 0), (0, max_len - attn.shape[1]))) for attn in attns]\n attns = torch.from_numpy(np.concatenate(attns, axis=0))\n else:\n attns = None\n\n graph = [f[\"graph\"] for f in batch]\n\n output = (input_ids, input_mask, labels, entity_pos, hts, sent_pos, sent_labels_tensor, attns, graph)\n\n return output" }, { "identifier": "create_directory", "path": "utils.py", "snippet": "def create_directory(d):\n if d and not os.path.exists(d):\n os.makedirs(d)\n return d" }, { "identifier": "read_docred", "path": "prepro.py", "snippet": "def read_docred(file_in,\n tokenizer,\n transformer_type=\"bert\",\n max_seq_length=1024,\n teacher_sig_path=\"\",\n single_results=None):\n\n i_line = 0\n pos_samples = 0\n neg_samples = 0\n features = []\n\n if file_in == \"\":\n return None\n\n with open(file_in, \"r\", encoding='utf-8') as fh:\n data = json.load(fh)\n\n if teacher_sig_path != \"\": # load logits\n basename = os.path.splitext(os.path.basename(file_in))[0]\n attns_file = os.path.join(teacher_sig_path, f\"{basename}.attns\")\n attns = pickle.load(open(attns_file, 'rb'))\n\n if single_results != None:\n # reorder predictions as relations by title\n pred_pos_samples = 0\n pred_neg_samples = 0\n pred_rels = single_results\n title2preds = {}\n for pred_rel in pred_rels:\n if pred_rel[\"title\"] in title2preds:\n title2preds[pred_rel[\"title\"]].append(pred_rel)\n else:\n title2preds[pred_rel[\"title\"]] = [pred_rel]\n\n for doc_id in tqdm(range(len(data)), desc=\"Loading examples\"):\n\n sample = data[doc_id]\n entities = sample['vertexSet']\n entity_start, entity_end = [], []\n # record entities\n for entity in entities:\n for mention in entity:\n sent_id = mention[\"sent_id\"]\n pos = mention[\"pos\"]\n entity_start.append((sent_id, pos[0],))\n entity_end.append((sent_id, pos[1] - 1,))\n\n # add entity markers\n sents, sent_map, sent_pos = add_entity_markers(sample, tokenizer, entity_start, entity_end)\n\n # training triples with positive examples (entity pairs with labels)\n train_triple = {}\n\n if \"labels\" in sample:\n for label in sample['labels']:\n evidence = label['evidence']\n r = int(docred_rel2id[label['r']])\n\n # update training triples\n if (label['h'], label['t']) not in train_triple:\n train_triple[(label['h'], label['t'])] = [\n {'relation': r, 'evidence': evidence}]\n else:\n train_triple[(label['h'], label['t'])].append(\n {'relation': r, 'evidence': evidence})\n\n # get anaphors in the doc\n mentions = set([m['name'] for e in entities for m in e])\n\n potential_mention = get_anaphors(sample['sents'], mentions)\n\n entities.append(potential_mention)\n\n # entity start, end position\n entity_pos = []\n\n for e in entities:\n entity_pos.append([])\n for m in e:\n start = sent_map[m[\"sent_id\"]][m[\"pos\"][0]]\n end = sent_map[m[\"sent_id\"]][m[\"pos\"][1]]\n label = m[\"type\"]\n entity_pos[-1].append((start, end,))\n\n relations, hts, sent_labels = [], [], []\n\n for h, t in train_triple.keys(): # for every entity pair with gold relation\n relation = [0] * len(docred_rel2id)\n sent_evi = [0] * len(sent_pos)\n\n for mention in train_triple[h, t]: # for each relation mention with head h and tail t\n relation[mention[\"relation\"]] = 1\n for i in mention[\"evidence\"]:\n sent_evi[i] += 1\n\n relations.append(relation)\n hts.append([h, t])\n sent_labels.append(sent_evi)\n pos_samples += 1\n\n for h in range(len(entities) - 1):\n for t in range(len(entities) - 1):\n # all entity pairs that do not have relation are treated as negative samples\n if h != t and [h, t] not in hts: # and [t, h] not in hts:\n relation = [1] + [0] * (len(docred_rel2id) - 1)\n sent_evi = [0] * len(sent_pos)\n relations.append(relation)\n\n hts.append([h, t])\n sent_labels.append(sent_evi)\n neg_samples += 1\n\n graph = create_graph(entity_pos)\n\n assert len(relations) == (len(entities) - 1) * (len(entities) - 2)\n assert len(sents) < max_seq_length\n sents = sents[:max_seq_length - 2] # truncate, -2 for [CLS] and [SEP]\n input_ids = tokenizer.convert_tokens_to_ids(sents)\n input_ids = tokenizer.build_inputs_with_special_tokens(input_ids)\n\n feature = [{'input_ids': input_ids,\n 'entity_pos': entity_pos if entity_pos[-1] != [] else entity_pos[:-1],\n 'labels': relations,\n 'hts': hts,\n 'sent_pos': sent_pos,\n 'sent_labels': sent_labels,\n 'title': sample['title'],\n 'graph': graph\n }]\n\n if teacher_sig_path != '': # add evidence distributions from the teacher model\n feature[0]['attns'] = attns[doc_id][:, :len(input_ids)]\n\n if single_results is not None: # get pseudo documents from predictions of the single run\n offset = 1 if transformer_type in [\"bert\", \"roberta\"] else 0\n if sample[\"title\"] in title2preds:\n feature, pos_sample, neg_sample, = get_pseudo_features(feature[0], title2preds[sample[\"title\"]],\n entities, sent_map, offset, tokenizer)\n pred_pos_samples += pos_sample\n pred_neg_samples += neg_sample\n\n i_line += len(feature)\n features.extend(feature)\n\n print(\"# of documents {}.\".format(i_line))\n if single_results is not None:\n print(\"# of positive examples {}.\".format(pred_pos_samples))\n print(\"# of negative examples {}.\".format(pred_neg_samples))\n\n else:\n print(\"# of positive examples {}.\".format(pos_samples))\n print(\"# of negative examples {}.\".format(neg_samples))\n\n return features" }, { "identifier": "to_official", "path": "evaluation.py", "snippet": "def to_official(preds: list, features: list, evi_preds: list = [], scores: list = [], topks: list = []):\n '''\n Convert the predictions to official format for evaluating.\n Input:\n :preds: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :evi_preds: list of the evidence prediction corresponding to each relation triple prediction.\n :scores: list of scores of topk relation labels for each entity pair.\n :topks: list of topk relation labels for each entity pair.\n Output:\n :official_res: official results used for evaluation.\n :res: topk results to be dumped into file, which can be further used during fushion.\n '''\n\n h_idx, t_idx, title, sents = [], [], [], []\n\n for f in features:\n if \"entity_map\" in f:\n hts = [[f[\"entity_map\"][ht[0]], f[\"entity_map\"][ht[1]]] for ht in f[\"hts\"]]\n else:\n hts = f[\"hts\"]\n\n h_idx += [ht[0] for ht in hts]\n t_idx += [ht[1] for ht in hts]\n title += [f[\"title\"] for ht in hts]\n sents += [len(f[\"sent_pos\"])] * len(hts)\n\n official_res = []\n res = []\n\n for i in range(preds.shape[0]): # for each entity pair\n if scores != []:\n score = extract_relative_score(scores[i], topks[i])\n pred = topks[i]\n else:\n pred = preds[i]\n pred = np.nonzero(pred)[0].tolist()\n\n for p in pred: # for each predicted relation label (topk)\n curr_result = {\n 'title': title[i],\n 'h_idx': h_idx[i],\n 't_idx': t_idx[i],\n 'r': id2rel[p],\n }\n if evi_preds != []:\n curr_evi = evi_preds[i]\n evis = np.nonzero(curr_evi)[0].tolist()\n curr_result[\"evidence\"] = [evi for evi in evis if evi < sents[i]]\n if scores != []:\n curr_result[\"score\"] = score[np.where(topks[i] == p)].item()\n if p != 0 and p in np.nonzero(preds[i])[0].tolist():\n official_res.append(curr_result)\n res.append(curr_result)\n\n return official_res, res" }, { "identifier": "official_evaluate", "path": "evaluation.py", "snippet": "def official_evaluate(tmp, path, train_file=\"train_annotated.json\", dev_file=\"dev.json\"):\n '''\n Adapted from the official evaluation code\n '''\n truth_dir = os.path.join(path, 'ref')\n\n if not os.path.exists(truth_dir):\n os.makedirs(truth_dir)\n\n fact_in_train_annotated = gen_train_facts(os.path.join(path, train_file), truth_dir)\n fact_in_train_distant = gen_train_facts(os.path.join(path, \"train_distant.json\"), truth_dir)\n\n truth = json.load(open(os.path.join(path, dev_file)))\n\n std = {}\n tot_evidences = 0\n titleset = set([])\n\n title2vectexSet = {}\n\n for x in truth:\n title = x['title']\n titleset.add(title)\n\n vertexSet = x['vertexSet']\n title2vectexSet[title] = vertexSet\n\n if 'labels' not in x: # official test set from DocRED\n continue\n\n for label in x['labels']:\n r = label['r']\n h_idx = label['h']\n t_idx = label['t']\n std[(title, r, h_idx, t_idx)] = set(label['evidence'])\n tot_evidences += len(label['evidence'])\n\n tot_relations = len(std)\n tmp.sort(key=lambda x: (x['title'], x['h_idx'], x['t_idx'], x['r']))\n submission_answer = [tmp[0]]\n\n for i in range(1, len(tmp)):\n x = tmp[i]\n y = tmp[i - 1]\n if (x['title'], x['h_idx'], x['t_idx'], x['r']) != (y['title'], y['h_idx'], y['t_idx'], y['r']):\n submission_answer.append(tmp[i])\n\n correct_re = 0\n correct_evidence = 0\n pred_evi = 0\n\n correct_in_train_annotated = 0\n correct_in_train_distant = 0\n titleset2 = set([])\n for x in submission_answer:\n title = x['title']\n h_idx = x['h_idx']\n t_idx = x['t_idx']\n r = x['r']\n titleset2.add(title)\n if title not in title2vectexSet:\n continue\n vertexSet = title2vectexSet[title]\n\n if 'evidence' in x: # and (title, h_idx, t_idx) in std:\n evi = set(x['evidence'])\n else:\n evi = set([])\n pred_evi += len(evi)\n\n if (title, r, h_idx, t_idx) in std:\n correct_re += 1\n stdevi = std[(title, r, h_idx, t_idx)]\n correct_evidence += len(stdevi & evi)\n in_train_annotated = in_train_distant = False\n for n1 in vertexSet[h_idx]:\n for n2 in vertexSet[t_idx]:\n if (n1['name'], n2['name'], r) in fact_in_train_annotated:\n in_train_annotated = True\n if (n1['name'], n2['name'], r) in fact_in_train_distant:\n in_train_distant = True\n\n if in_train_annotated:\n correct_in_train_annotated += 1\n if in_train_distant:\n correct_in_train_distant += 1\n\n re_p = 1.0 * correct_re / len(submission_answer)\n re_r = 1.0 * correct_re / tot_relations if tot_relations != 0 else 0\n if re_p + re_r == 0:\n re_f1 = 0\n else:\n re_f1 = 2.0 * re_p * re_r / (re_p + re_r)\n\n evi_p = 1.0 * correct_evidence / pred_evi if pred_evi > 0 else 0\n evi_r = 1.0 * correct_evidence / tot_evidences if tot_evidences > 0 else 0\n\n if evi_p + evi_r == 0:\n evi_f1 = 0\n else:\n evi_f1 = 2.0 * evi_p * evi_r / (evi_p + evi_r)\n\n re_p_ignore_train_annotated = 1.0 * (correct_re - correct_in_train_annotated) / (\n len(submission_answer) - correct_in_train_annotated + 1e-5)\n re_p_ignore_train = 1.0 * (correct_re - correct_in_train_distant) / (\n len(submission_answer) - correct_in_train_distant + 1e-5)\n\n if re_p_ignore_train_annotated + re_r == 0:\n re_f1_ignore_train_annotated = 0\n else:\n re_f1_ignore_train_annotated = 2.0 * re_p_ignore_train_annotated * re_r / (re_p_ignore_train_annotated + re_r)\n\n if re_p_ignore_train + re_r == 0:\n re_f1_ignore_train = 0\n else:\n re_f1_ignore_train = 2.0 * re_p_ignore_train * re_r / (re_p_ignore_train + re_r)\n\n return [re_p, re_r, re_f1], [evi_p, evi_r, evi_f1], \\\n [re_p_ignore_train_annotated, re_r, re_f1_ignore_train_annotated], \\\n [re_p_ignore_train, re_r, re_f1_ignore_train]" }, { "identifier": "merge_results", "path": "evaluation.py", "snippet": "def merge_results(pred: list, pred_pseudo: list, features: list, thresh: float = None):\n '''\n Merge relation predictions from the original document and psuedo documents.\n Input:\n :pred: list of dictionaries, each dictionary entry is a predicted relation triple from the original document. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :pred_pseudo: list of dictionaries, each dictionary entry is a predicted relation triple from pseudo documents. Keys: ['title', 'h_idx', 't_idx', 'r', 'evidence', 'score'].\n :features: list of features within each document. Identical to the lists obtained from pre-processing.\n :thresh: threshold for selecting predictions.\n Output:\n :merged_res: list of merged relation predictions. Each relation prediction is a dictionay with keys (title, h_idx, t_idx, r).\n :thresh: threshold of selecting relation predictions.\n '''\n\n title2pred = get_title2pred(pred)\n title2pred_pseudo = get_title2pred(pred_pseudo)\n\n title2gt = get_title2gt(features)\n num_gt = sum([len(title2gt[t]) for t in title2gt])\n\n titles = list(title2pred.keys())\n cand = []\n merged_res = []\n correct, num_pred = 0, 0\n\n for t in titles:\n rels = title2pred[t]\n rels_pseudo = title2pred_pseudo[t] if t in title2pred_pseudo else {}\n\n union = set(rels.keys()) | set(rels_pseudo.keys())\n for r in union:\n if r in rels and r in rels_pseudo: # add those into predictions\n if rels[r] > 0 and rels_pseudo[r] > 0:\n merged_res.append({'title': t, 'h_idx': r[0], 't_idx': r[1], 'r': r[2]})\n num_pred += 1\n correct += r in title2gt[t]\n continue\n score = rels[r] + rels_pseudo[r]\n elif r in rels: # -10 for penalty\n score = rels[r] - 10\n elif r in rels_pseudo:\n score = rels_pseudo[r] - 10\n cand.append((r in title2gt[t], score, t, r[0], r[1], r[2]))\n\n if thresh != None:\n sorted_pred = sorted(cand, key=lambda x: x[1], reverse=True)\n last = min(filter(lambda x: x[1] > thresh, sorted_pred))\n until = sorted_pred.index(last)\n cand = sorted_pred[:until + 1]\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n return merged_res, thresh\n\n if cand != []:\n thresh, cand = select_thresh(cand, num_gt, correct, num_pred)\n merged_res.extend([{'title': r[2], 'h_idx': r[3], 't_idx': r[4], 'r': r[5]} for r in cand])\n\n return merged_res, thresh" } ]
import argparse import os import numpy as np import torch import ujson as json import pandas as pd import pickle from torch.cuda.amp import GradScaler from torch.utils.data import DataLoader from transformers import AutoConfig, AutoModel, AutoTokenizer from transformers.optimization import AdamW, get_linear_schedule_with_warmup from args import add_args from model import DocREModel from utils import set_seed, collate_fn, create_directory from prepro import read_docred from evaluation import to_official, official_evaluate, merge_results from tqdm import tqdm
10,826
for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test":
def load_input(batch, device, tag="dev"): input = {'input_ids': batch[0].to(device), 'attention_mask': batch[1].to(device), 'labels': batch[2].to(device), 'entity_pos': batch[3], 'hts': batch[4], 'sent_pos': batch[5], 'sent_labels': batch[6].to(device) if (not batch[6] is None) and (batch[7] is None) else None, 'teacher_attns': batch[7].to(device) if not batch[7] is None else None, 'graph': batch[8], 'tag': tag } return input def train(args, model, train_features, dev_features): def finetune(features, optimizer, num_epoch, num_steps): best_score = -1 train_dataloader = DataLoader(features, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn, drop_last=True) train_iterator = range(int(num_epoch)) total_steps = int(len(train_dataloader) * num_epoch // args.gradient_accumulation_steps) warmup_steps = int(total_steps * args.warmup_ratio) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_steps) scaler = GradScaler() print("Total steps: {}".format(total_steps)) print("Warmup steps: {}".format(warmup_steps)) for epoch in tqdm(train_iterator, desc='Train epoch'): for step, batch in enumerate(train_dataloader): model.zero_grad() optimizer.zero_grad() model.train() inputs = load_input(batch, args.device) outputs = model(**inputs) loss = [outputs["loss"]["rel_loss"]] if inputs["sent_labels"] is not None: loss.append(outputs["loss"]["evi_loss"] * args.evi_lambda) if inputs["teacher_attns"] is not None: loss.append(outputs["loss"]["attn_loss"] * args.attn_lambda) loss = sum(loss) / args.gradient_accumulation_steps scaler.scale(loss).backward() if step % args.gradient_accumulation_steps == 0: if args.max_grad_norm > 0: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) scaler.step(optimizer) scaler.update() scheduler.step() model.zero_grad() num_steps += 1 if (step + 1) == len(train_dataloader) or ( args.evaluation_steps > 0 and num_steps % args.evaluation_steps == 0 and step % args.gradient_accumulation_steps == 0): dev_scores, dev_output, official_results, results = evaluate(args, model, dev_features, tag="dev") print(dev_output) if dev_scores["dev_F1_ign"] > best_score: best_score = dev_scores["dev_F1_ign"] best_offi_results = official_results best_results = results best_output = dev_output ckpt_file = os.path.join(args.save_path, "best.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) if epoch == train_iterator[-1]: # last epoch ckpt_file = os.path.join(args.save_path, "last.ckpt") print(f"saving model checkpoint into {ckpt_file} ...") torch.save(model.state_dict(), ckpt_file) pred_file = os.path.join(args.save_path, args.pred_file) score_file = os.path.join(args.save_path, "scores.csv") results_file = os.path.join(args.save_path, f"topk_{args.pred_file}") dump_to_file(best_offi_results, pred_file, best_output, score_file, best_results, results_file) return num_steps new_layer = ["extractor", "bilinear", "graph"] optimizer_grouped_parameters = [ {"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in new_layer)], }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in new_layer)], "lr": args.lr_added}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.lr_transformer, eps=args.adam_epsilon) num_steps = 0 set_seed(args) model.zero_grad() finetune(train_features, optimizer, args.num_train_epochs, num_steps) def evaluate(args, model, features, tag="dev"): dataloader = DataLoader(features, batch_size=args.test_batch_size, shuffle=False, collate_fn=collate_fn, drop_last=False) preds, evi_preds = [], [] scores, topks = [], [] attns = [] for batch in dataloader: model.eval() if args.save_attn: tag = "infer" inputs = load_input(batch, args.device, tag) with torch.no_grad(): outputs = model(**inputs) pred = outputs["rel_pred"] pred = pred.cpu().numpy() pred[np.isnan(pred)] = 0 preds.append(pred) if "scores" in outputs: scores.append(outputs["scores"].cpu().numpy()) topks.append(outputs["topks"].cpu().numpy()) if "evi_pred" in outputs: # relation extraction and evidence extraction evi_pred = outputs["evi_pred"] evi_pred = evi_pred.cpu().numpy() evi_preds.append(evi_pred) if "attns" in outputs: # attention recorded attn = outputs["attns"] attns.extend([a.cpu().numpy() for a in attn]) preds = np.concatenate(preds, axis=0) if scores: scores = np.concatenate(scores, axis=0) topks = np.concatenate(topks, axis=0) if evi_preds: evi_preds = np.concatenate(evi_preds, axis=0) official_results, results = to_official(preds, features, evi_preds=evi_preds, scores=scores, topks=topks) if len(official_results) > 0: if tag == "test":
best_re, best_evi, best_re_ign, _ = official_evaluate(official_results, args.data_dir, args.train_file,
7
2023-10-20 05:53:25+00:00
16k
xingchenshanyao/YOLOP-E
lib/core/function.py
[ { "identifier": "ConfusionMatrix", "path": "lib/core/evaluate.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc=1, conf=0.25, iou_thres=0.45):\n nc = 10 # 20230904 nc是类别数\n self.matrix = np.zeros((nc + 1, nc + 1))\n # import pdb;pdb.set_trace()\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = general.box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n # import pdb;pdb.set_trace()\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(np.int16)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[gc, detection_classes[m1[j]]] += 1 # correct\n else:\n # import pdb;pdb.set_trace()\n self.matrix[gc, self.nc] += 1 # background FP\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[self.nc, dc] += 1 # background FN\n\n def matrix(self):\n return self.matrix\n\n def plot(self, save_dir='', names=()):\n try:\n import seaborn as sn\n\n array = self.matrix / (self.matrix.sum(0).reshape(1, self.nc + 1) + 1E-6) # normalize\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig = plt.figure(figsize=(12, 9), tight_layout=True)\n sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size\n labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels\n sn.heatmap(array, annot=self.nc < 30, annot_kws={\"size\": 8}, cmap='Blues', fmt='.2f', square=True,\n xticklabels=names + ['background FN'] if labels else \"auto\",\n yticklabels=names + ['background FP'] if labels else \"auto\").set_facecolor((1, 1, 1))\n fig.axes[0].set_xlabel('True')\n fig.axes[0].set_ylabel('Predicted')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n except Exception as e:\n pass\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "SegmentationMetric", "path": "lib/core/evaluate.py", "snippet": "class SegmentationMetric(object):\n '''\n imgLabel [batch_size, height(144), width(256)]\n confusionMatrix [[0(TN),1(FP)],\n [2(FN),3(TP)]]\n '''\n def __init__(self, numClass):\n self.numClass = numClass\n self.confusionMatrix = np.zeros((self.numClass,)*2)\n\n def pixelAccuracy(self):\n # return all class overall pixel accuracy\n # acc = (TP + TN) / (TP + TN + FP + TN)\n acc = np.diag(self.confusionMatrix).sum() / self.confusionMatrix.sum()\n return acc\n \n def lineAccuracy(self):\n Acc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=1) + 1e-12)\n return Acc[1]\n\n def classPixelAccuracy(self):\n # return each category pixel accuracy(A more accurate way to call it precision)\n # acc = (TP) / TP + FP\n classAcc = np.diag(self.confusionMatrix) / (self.confusionMatrix.sum(axis=0) + 1e-12)\n return classAcc\n\n def meanPixelAccuracy(self):\n classAcc = self.classPixelAccuracy()\n meanAcc = np.nanmean(classAcc)\n return meanAcc\n\n def meanIntersectionOverUnion(self):\n # Intersection = TP Union = TP + FP + FN\n # IoU = TP / (TP + FP + FN)\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n mIoU = np.nanmean(IoU)\n return mIoU\n \n def IntersectionOverUnion(self):\n intersection = np.diag(self.confusionMatrix)\n union = np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) - np.diag(self.confusionMatrix)\n IoU = intersection / union\n IoU[np.isnan(IoU)] = 0\n return IoU[1]\n\n def genConfusionMatrix(self, imgPredict, imgLabel):\n # remove classes from unlabeled pixels in gt image and predict\n # print(imgLabel.shape)\n mask = (imgLabel >= 0) & (imgLabel < self.numClass)\n label = self.numClass * imgLabel[mask] + imgPredict[mask]\n count = np.bincount(label, minlength=self.numClass**2)\n confusionMatrix = count.reshape(self.numClass, self.numClass)\n return confusionMatrix\n\n def Frequency_Weighted_Intersection_over_Union(self):\n # FWIOU = [(TP+FN)/(TP+FP+TN+FN)] *[TP / (TP + FP + FN)]\n freq = np.sum(self.confusionMatrix, axis=1) / np.sum(self.confusionMatrix)\n iu = np.diag(self.confusionMatrix) / (\n np.sum(self.confusionMatrix, axis=1) + np.sum(self.confusionMatrix, axis=0) -\n np.diag(self.confusionMatrix))\n FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()\n return FWIoU\n\n\n def addBatch(self, imgPredict, imgLabel):\n assert imgPredict.shape == imgLabel.shape\n self.confusionMatrix += self.genConfusionMatrix(imgPredict, imgLabel)\n\n def reset(self):\n self.confusionMatrix = np.zeros((self.numClass, self.numClass))" }, { "identifier": "non_max_suppression", "path": "lib/core/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, labels=()):\n \"\"\"Performs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n detections with shape: nx6 (x1, y1, x2, y2, conf, cls)\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_det = 300 # maximum number of detections per image\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label = nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "check_img_size", "path": "lib/core/general.py", "snippet": "def check_img_size(img_size, s=32):\n # Verify img_size is a multiple of stride s\n new_size = make_divisible(img_size, int(s)) # ceil gs-multiple # new_size = 640\n if new_size != img_size:\n print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))\n return new_size" }, { "identifier": "scale_coords", "path": "lib/core/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "xyxy2xywh", "path": "lib/core/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "xywh2xyxy", "path": "lib/core/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "box_iou", "path": "lib/core/general.py", "snippet": "def box_iou(box1, box2):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n def box_area(box):\n # box = 4xn\n return (box[2] - box[0]) * (box[3] - box[1]) #(x2-x1)*(y2-y1)\n\n area1 = box_area(box1.T)\n area2 = box_area(box2.T)\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)\n return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)" }, { "identifier": "coco80_to_coco91_class", "path": "lib/core/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]\n return x" }, { "identifier": "plot_images", "path": "lib/core/general.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "ap_per_class", "path": "lib/core/general.py", "snippet": "def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='precision-recall_curve.png', names=[]):\n \"\"\" Compute the average precision, given the recall and precision curves.\n Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.\n # Arguments\n tp: True positives (nparray, nx1 or nx10).\n conf: Objectness value from 0-1 (nparray).\n pred_cls: Predicted object classes (nparray).\n target_cls: True object classes (nparray).\n plot: Plot precision-recall curve at [email protected]\n save_dir: Plot save directory\n # Returns\n The average precision as computed in py-faster-rcnn.\n \"\"\"\n\n # Sort by objectness\n i = np.argsort(-conf)\n tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]\n\n # Find unique classes\n unique_classes = np.unique(target_cls)\n\n # Create Precision-Recall curve and compute AP for each class\n px, py = np.linspace(0, 1, 1000), [] # for plotting\n pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898\n s = [unique_classes.shape[0], tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)\n ap, p, r = np.zeros(s), np.zeros((unique_classes.shape[0], 1000)), np.zeros((unique_classes.shape[0], 1000))\n for ci, c in enumerate(unique_classes):\n i = pred_cls == c\n n_l = (target_cls == c).sum() # number of labels\n n_p = i.sum() # number of predictions\n\n if n_p == 0 or n_l == 0:\n continue\n else:\n # Accumulate FPs and TPs\n fpc = (1 - tp[i]).cumsum(0)\n tpc = tp[i].cumsum(0)\n\n # Recall\n recall = tpc / (n_l + 1e-16) # recall curve\n r[ci] = np.interp(-px, -conf[i], recall[:, 0], left=0) # negative x, xp because xp decreases\n\n # Precision\n precision = tpc / (tpc + fpc) # precision curve\n p[ci] = np.interp(-px, -conf[i], precision[:, 0], left=1) # p at pr_score\n # AP from recall-precision curve\n for j in range(tp.shape[1]):\n ap[ci, j], mpre, mrec = compute_ap(recall[:, j], precision[:, j])\n if plot and (j == 0):\n py.append(np.interp(px, mrec, mpre)) # precision at [email protected]\n\n # Compute F1 score (harmonic mean of precision and recall)\n f1 = 2 * p * r / (p + r + 1e-16)\n i=r.mean(0).argmax()\n\n if plot:\n plot_pr_curve(px, py, ap, save_dir, names)\n\n return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32')" }, { "identifier": "output_to_target", "path": "lib/core/general.py", "snippet": "def output_to_target(output):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf]\n targets = []\n for i, o in enumerate(output):\n for *box, conf, cls in o.cpu().numpy():\n targets.append([i, cls, *list(*xyxy2xywh(np.array(box)[None])), conf])\n return np.array(targets)" }, { "identifier": "time_synchronized", "path": "lib/utils/utils.py", "snippet": "def time_synchronized():\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return time.time()" }, { "identifier": "plot_img_and_mask", "path": "lib/utils/plot.py", "snippet": "def plot_img_and_mask(img, mask, index,epoch,save_dir):\r\n classes = mask.shape[2] if len(mask.shape) > 2 else 1\r\n fig, ax = plt.subplots(1, classes + 1)\r\n ax[0].set_title('Input image')\r\n ax[0].imshow(img)\r\n if classes > 1:\r\n for i in range(classes):\r\n ax[i+1].set_title(f'Output mask (class {i+1})')\r\n ax[i+1].imshow(mask[:, :, i])\r\n else:\r\n ax[1].set_title(f'Output mask')\r\n ax[1].imshow(mask)\r\n plt.xticks([]), plt.yticks([])\r\n # plt.show()\r\n plt.savefig(save_dir+\"/batch_{}_{}_seg.png\".format(epoch,index))\r" }, { "identifier": "plot_one_box", "path": "lib/utils/plot.py", "snippet": "def plot_one_box(x, img, color=None, label=None, line_thickness=None):\r\n # Plots one bounding box on image img 在图像上画一个检测框\r\n tl = line_thickness or round(0.0001 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness\r\n color = color or [random.randint(0, 255) for _ in range(3)]\r\n c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))\r\n cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)\r\n if label:\r\n tf = max(tl - 1, 1) # font thickness\r\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\r\n c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3\r\n cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled\r\n print(label)\r\n cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)\r" }, { "identifier": "show_seg_result", "path": "lib/utils/plot.py", "snippet": "def show_seg_result(img, result, index, epoch, save_dir=None, is_ll=False,palette=None,is_demo=False,is_gt=False):\r\n # img = mmcv.imread(img)\r\n # img = img.copy()\r\n # seg = result[0]\r\n if palette is None:\r\n palette = np.random.randint(\r\n 0, 255, size=(3, 3))\r\n palette[0] = [0, 0, 0]\r\n palette[1] = [0, 255, 0]\r\n palette[2] = [255, 0, 0]\r\n palette = np.array(palette)\r\n assert palette.shape[0] == 3 # len(classes)\r\n assert palette.shape[1] == 3\r\n assert len(palette.shape) == 2\r\n \r\n if not is_demo:\r\n color_seg = np.zeros((result.shape[0], result.shape[1], 3), dtype=np.uint8)\r\n for label, color in enumerate(palette):\r\n color_seg[result == label, :] = color\r\n else:\r\n color_area = np.zeros((result[0].shape[0], result[0].shape[1], 3), dtype=np.uint8)\r\n \r\n # for label, color in enumerate(palette):\r\n # color_area[result[0] == label, :] = color\r\n\r\n color_area[result[0] == 1] = [0, 255, 0]\r\n color_area[result[1] ==1] = [255, 0, 0]\r\n color_seg = color_area\r\n\r\n # convert to BGR\r\n color_seg = color_seg[..., ::-1]\r\n # print(color_seg.shape)\r\n color_mask = np.mean(color_seg, 2)\r\n img[color_mask != 0] = img[color_mask != 0] * 0.5 + color_seg[color_mask != 0] * 0.5\r\n # img = img * 0.5 + color_seg * 0.5\r\n img = img.astype(np.uint8)\r\n img = cv2.resize(img, (1280,720), interpolation=cv2.INTER_LINEAR)\r\n\r\n if not is_demo:\r\n if not is_gt:\r\n if not is_ll:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_da_segresult.png\".format(epoch,index), img)\r\n else:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_ll_segresult.png\".format(epoch,index), img)\r\n else:\r\n if not is_ll:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_da_seg_gt.png\".format(epoch,index), img)\r\n else:\r\n cv2.imwrite(save_dir+\"/batch_{}_{}_ll_seg_gt.png\".format(epoch,index), img) \r\n return img\r" } ]
import time import torch import numpy as np import json import random import cv2 import os import math import wandb from lib.core.evaluate import ConfusionMatrix,SegmentationMetric from lib.core.general import non_max_suppression,check_img_size,scale_coords,xyxy2xywh,xywh2xyxy,box_iou,coco80_to_coco91_class,plot_images,ap_per_class,output_to_target from lib.utils.utils import time_synchronized from lib.utils import plot_img_and_mask,plot_one_box,show_seg_result from threading import Thread from PIL import Image from torchvision import transforms from pathlib import Path from torch.cuda import amp from tqdm import tqdm from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,793
img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det): det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round() for *xyxy,conf,cls in reversed(det): #print(cls) # import pdb;pdb.set_trace() label_det_pred = f'{names[int(cls)]} {conf:.3f}' plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det) labels = target[0][target[0][:, 0] == i, 1:] # print(labels) labels[:,1:5]=xywh2xyxy(labels[:,1:5]) if len(labels): labels[:,1:5]=scale_coords(img[i].shape[1:],labels[:,1:5],img_gt.shape).round() for cls,x1,y1,x2,y2 in labels: # print(names) # print(cls) label_det_gt = f'{names[int(cls)]}' xyxy = (x1,y1,x2,y2) plot_one_box(xyxy, img_gt , label=label_det_gt, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_gt.png".format(epoch,i),img_gt) # Statistics per image # output([xyxy,conf,cls]) # target[0] ([img_id,cls,xyxy]) for si, pred in enumerate(output): labels = target[0][target[0][:, 0] == si, 1:] #all object in one image nl = len(labels) # num of object tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if config.TEST.SAVE_TXT: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if config.TEST.PLOTS and len(wandb_images) < log_imgs: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if config.TEST.SAVE_JSON: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if config.TEST.PLOTS: confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious # n*m n:pred m:label
id_dict_SDExpressway = { 0:'Car', 1:'Truck', 2:'Guidance Sign', 3:'Warning Sign', 4:'Pending Sign', 5:'Speed Limit Sign', 6:'Emergency Telephone Sign', 7:'Directional Sign', 8:'Straight Ahead Arrow', 9:'Straight or Right Turn Arrow'} def train(cfg, train_loader, model, criterion, optimizer, scaler, epoch, num_batch, num_warmup, writer_dict, logger, device, rank=-1): """ train for one epoch Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return total_loss, head_losses - writer_dict: outputs(2,) output[0] len:3, [1,3,32,32,85], [1,3,16,16,85], [1,3,8,8,85] output[1] len:1, [2,256,256] output[2] len:1, [2,256,256] target(2,) target[0] [1,n,5] target[1] [2,256,256] target[2] [2,256,256] Returns: None """ batch_time = AverageMeter() # batch_time = <lib.core.function.AverageMeter object at 0x7f0255618970> data_time = AverageMeter() # data_time = <lib.core.function.AverageMeter object at 0x7f025561a4f0> losses = AverageMeter() # losses = <lib.core.function.AverageMeter object at 0x7f02402e7cd0> # switch to train mode model.train() start = time.time() # start = 1688805138.6791408 for i, (input, target, paths, shapes) in enumerate(train_loader): # i=0 # target = [tensor([[0.0000e+00,...335e-01]]), tensor([[[[1., 1., 1...., 0.]]]]), tensor([[[[1., 1., 1...., 0.]]]])] # paths = ('/home/xingchen/Study...3225df.jpg', '/home/xingchen/Study...49926c.jpg', ...) # shapes = (((720, 1280), ((0.5, 0.5), (0.0, 12.0))), ((...), (...)), ...) intermediate = time.time() # intermediate = 1688805496.5324085 #print('tims:{}'.format(intermediate-start)) num_iter = i + num_batch * (epoch - 1) # num_iter = 0 # num_batch = 4375 if num_iter < num_warmup: # warm up lf = lambda x: ((1 + math.cos(x * math.pi / cfg.TRAIN.END_EPOCH)) / 2) * \ (1 - cfg.TRAIN.LRF) + cfg.TRAIN.LRF # cosine xi = [0, num_warmup] # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 # 偏置lr从0.1下降到lr0,所有其他lr从0.0上升到lr0 x['lr'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_BIASE_LR if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) if 'momentum' in x: x['momentum'] = np.interp(num_iter, xi, [cfg.TRAIN.WARMUP_MOMENTUM, cfg.TRAIN.MOMENTUM]) data_time.update(time.time() - start) if not cfg.DEBUG: input = input.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target with amp.autocast(enabled=device.type != 'cpu'): outputs = model(input) # outputs = [[tensor([[[[[ 8.8806e...ackward0>), tensor([[[[[ 4.6631e...ackward0>), tensor([[[[[ 1.4758e...ackward0>)], tensor([[[[0.5151, 0...ackward0>), tensor([[[[0.4868, 0...ackward0>)] total_loss, head_losses = criterion(outputs, target, shapes,model) # print(head_losses) # compute gradient and do update step optimizer.zero_grad() scaler.scale(total_loss).backward() scaler.step(optimizer) scaler.update() if rank in [-1, 0]: # measure accuracy and record loss losses.update(total_loss.item(), input.size(0)) # _, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(), # target.detach().cpu().numpy()) # acc.update(avg_acc, cnt) # measure elapsed time batch_time.update(time.time() - start) end = time.time() if i % cfg.PRINT_FREQ == 0: msg = 'Epoch: [{0}][{1}/{2}]\t' \ 'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \ 'Speed {speed:.1f} samples/s\t' \ 'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \ 'Loss {loss.val:.5f} ({loss.avg:.5f})'.format( epoch, i, len(train_loader), batch_time=batch_time, speed=input.size(0)/batch_time.val, data_time=data_time, loss=losses) logger.info(msg) writer = writer_dict['writer'] global_steps = writer_dict['train_global_steps'] writer.add_scalar('train_loss', losses.val, global_steps) # writer.add_scalar('train_acc', acc.val, global_steps) writer_dict['train_global_steps'] = global_steps + 1 def validate(epoch,config, val_loader, val_dataset, model, criterion, output_dir, tb_log_dir, writer_dict=None, logger=None, device='cpu', rank=-1,nc = 1): """ validata Inputs: - config: configurations - train_loader: loder for data - model: - criterion: (function) calculate all the loss, return - writer_dict: Return: None """ # setting max_stride = 32 weights = None save_dir = output_dir + os.path.sep + 'visualization' # save_dir = 'runs/BddDataset/_2023-07-09-09-50/visualization' if not os.path.exists(save_dir): os.mkdir(save_dir) # print(save_dir) _, imgsz = [check_img_size(x, s=max_stride) for x in config.MODEL.IMAGE_SIZE] #imgsz is multiple of max_stride batch_size = config.TRAIN.BATCH_SIZE_PER_GPU * len(config.GPUS) # batch_size = 16 test_batch_size = config.TEST.BATCH_SIZE_PER_GPU * len(config.GPUS) # test_batch_size = 16 training = False is_coco = False #is coco dataset save_conf=False # save auto-label confidences verbose=False save_hybrid=False log_imgs,wandb = min(16,100), None nc = 10 #20230904 iouv = torch.linspace(0.5,0.95,10).to(device) #iou vector for [email protected]:0.95 niou = iouv.numel() # niou = 10 try: except ImportError: wandb = None log_imgs = 0 seen = 0 # import pdb;pdb.set_trace() confusion_matrix = ConfusionMatrix(nc=model.nc) #detector confusion matrix # confusion matrix 混合矩阵 da_metric = SegmentationMetric(config.num_seg_class) #segment confusion matrix ll_metric = SegmentationMetric(2) #segment confusion matrix # names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} # names = {'0':0} names = id_dict_SDExpressway #20230904 colors = [[random.randint(0, 255) for _ in range(3)] for _ in names] # colors = [[191, 83, 111]] coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95') # s = ' Class Images Targets P R [email protected] [email protected]:.95' p, r, f1, mp, mr, map50, map, t_inf, t_nms = 0., 0., 0., 0., 0., 0., 0., 0., 0. losses = AverageMeter() da_acc_seg = AverageMeter() da_IoU_seg = AverageMeter() da_mIoU_seg = AverageMeter() ll_acc_seg = AverageMeter() ll_IoU_seg = AverageMeter() ll_mIoU_seg = AverageMeter() T_inf = AverageMeter() T_nms = AverageMeter() # switch to train mode model.eval() jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, target, paths, shapes) in tqdm(enumerate(val_loader), total=len(val_loader)): if not config.DEBUG: img = img.to(device, non_blocking=True) assign_target = [] for tgt in target: assign_target.append(tgt.to(device)) target = assign_target nb, _, height, width = img.shape #batch size, channel, height, width with torch.no_grad(): pad_w, pad_h = shapes[0][1][1] pad_w = int(pad_w) pad_h = int(pad_h) ratio = shapes[0][1][0][0] t = time_synchronized() det_out, da_seg_out, ll_seg_out= model(img) # 检测图片? t_inf = time_synchronized() - t if batch_i > 0: T_inf.update(t_inf/img.size(0),img.size(0)) inf_out,train_out = det_out #driving area segment evaluation # 可驾驶区域分割评估 _,da_predict=torch.max(da_seg_out, 1) _,da_gt=torch.max(target[1], 1) da_predict = da_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] da_gt = da_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] da_metric.reset() da_metric.addBatch(da_predict.cpu(), da_gt.cpu()) da_acc = da_metric.pixelAccuracy() da_IoU = da_metric.IntersectionOverUnion() da_mIoU = da_metric.meanIntersectionOverUnion() da_acc_seg.update(da_acc,img.size(0)) da_IoU_seg.update(da_IoU,img.size(0)) da_mIoU_seg.update(da_mIoU,img.size(0)) #lane line segment evaluation # 车道线分割评估 _,ll_predict=torch.max(ll_seg_out, 1) _,ll_gt=torch.max(target[2], 1) ll_predict = ll_predict[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_gt = ll_gt[:, pad_h:height-pad_h, pad_w:width-pad_w] ll_metric.reset() ll_metric.addBatch(ll_predict.cpu(), ll_gt.cpu()) ll_acc = ll_metric.lineAccuracy() ll_IoU = ll_metric.IntersectionOverUnion() ll_mIoU = ll_metric.meanIntersectionOverUnion() ll_acc_seg.update(ll_acc,img.size(0)) ll_IoU_seg.update(ll_IoU,img.size(0)) ll_mIoU_seg.update(ll_mIoU,img.size(0)) total_loss, head_losses = criterion((train_out,da_seg_out, ll_seg_out), target, shapes,model) #Compute loss losses.update(total_loss.item(), img.size(0)) #NMS # 非极大值抑制 t = time_synchronized() target[0][:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [target[0][target[0][:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling output = non_max_suppression(inf_out, conf_thres= config.TEST.NMS_CONF_THRESHOLD, iou_thres=config.TEST.NMS_IOU_THRESHOLD, labels=lb) #output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6) #output = non_max_suppression(inf_out, conf_thres=config.TEST.NMS_CONF_THRES, iou_thres=config.TEST.NMS_IOU_THRES) t_nms = time_synchronized() - t if batch_i > 0: T_nms.update(t_nms/img.size(0),img.size(0)) if config.TEST.PLOTS: if batch_i == 0: for i in range(test_batch_size): img_test = cv2.imread(paths[i]) da_seg_mask = da_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_seg_mask = torch.nn.functional.interpolate(da_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_seg_mask = torch.max(da_seg_mask, 1) da_gt_mask = target[1][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) da_gt_mask = torch.nn.functional.interpolate(da_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, da_gt_mask = torch.max(da_gt_mask, 1) da_seg_mask = da_seg_mask.int().squeeze().cpu().numpy() da_gt_mask = da_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_test1 = img_test.copy() _ = show_seg_result(img_test, da_seg_mask, i,epoch,save_dir) _ = show_seg_result(img_test1, da_gt_mask, i, epoch, save_dir, is_gt=True) img_ll = cv2.imread(paths[i]) ll_seg_mask = ll_seg_out[i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_seg_mask = torch.nn.functional.interpolate(ll_seg_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_seg_mask = torch.max(ll_seg_mask, 1) ll_gt_mask = target[2][i][:, pad_h:height-pad_h, pad_w:width-pad_w].unsqueeze(0) ll_gt_mask = torch.nn.functional.interpolate(ll_gt_mask, scale_factor=int(1/ratio), mode='bilinear') _, ll_gt_mask = torch.max(ll_gt_mask, 1) ll_seg_mask = ll_seg_mask.int().squeeze().cpu().numpy() ll_gt_mask = ll_gt_mask.int().squeeze().cpu().numpy() # seg_mask = seg_mask > 0.5 # plot_img_and_mask(img_test, seg_mask, i,epoch,save_dir) img_ll1 = img_ll.copy() _ = show_seg_result(img_ll, ll_seg_mask, i,epoch,save_dir, is_ll=True) _ = show_seg_result(img_ll1, ll_gt_mask, i, epoch, save_dir, is_ll=True, is_gt=True) img_det = cv2.imread(paths[i]) img_gt = img_det.copy() det = output[i].clone() if len(det): det[:,:4] = scale_coords(img[i].shape[1:],det[:,:4],img_det.shape).round() for *xyxy,conf,cls in reversed(det): #print(cls) # import pdb;pdb.set_trace() label_det_pred = f'{names[int(cls)]} {conf:.3f}' plot_one_box(xyxy, img_det , label=label_det_pred, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_pred.png".format(epoch,i),img_det) labels = target[0][target[0][:, 0] == i, 1:] # print(labels) labels[:,1:5]=xywh2xyxy(labels[:,1:5]) if len(labels): labels[:,1:5]=scale_coords(img[i].shape[1:],labels[:,1:5],img_gt.shape).round() for cls,x1,y1,x2,y2 in labels: # print(names) # print(cls) label_det_gt = f'{names[int(cls)]}' xyxy = (x1,y1,x2,y2) plot_one_box(xyxy, img_gt , label=label_det_gt, color=colors[int(cls)], line_thickness=3) cv2.imwrite(save_dir+"/batch_{}_{}_det_gt.png".format(epoch,i),img_gt) # Statistics per image # output([xyxy,conf,cls]) # target[0] ([img_id,cls,xyxy]) for si, pred in enumerate(output): labels = target[0][target[0][:, 0] == si, 1:] #all object in one image nl = len(labels) # num of object tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if config.TEST.SAVE_TXT: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if config.TEST.PLOTS and len(wandb_images) < log_imgs: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if config.TEST.SAVE_JSON: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if config.TEST.PLOTS: confusion_matrix.process_batch(pred, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious # n*m n:pred m:label
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
7
2023-10-24 02:08:25+00:00
16k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It includes methods for preprocessing, resizing, and normalizing audio data.\n Subclasses may override these methods to implement dataset-specific processing and resizing logic.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initialize an AudioDataset instance.\n\n Args:\n data_config (DictConfig): Configuration for loading the dataset, including paths, audio properties, etc.\n split (str): Specifies which split of the dataset to load (e.g., 'train', 'validation', 'test').\n evaluation (bool, optional): Indicates whether the dataset is for evaluation purposes. Defaults to False.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def normalize_audio(audio_np: np.ndarray, sample_rate: int) -> np.ndarray:\n \"\"\"Normalize the amplitude of the audio data to a standard range.\n\n This method utilizes PyDub's effects module to perform audio normalization.\n\n Args:\n audio_np (np.ndarray): Audio data represented as a NumPy array.\n sample_rate (int): The sample rate of the audio data.\n\n Returns:\n np.ndarray: The normalized audio data as a NumPy array.\n \"\"\"\n # Convert numpy array to AudioSegment\n audio_segment = AudioSegment(audio_np.tobytes(), frame_rate=int(sample_rate), sample_width=2, channels=1)\n\n # Normalize with PyDub\n normalized_audio_segment = effects.normalize(audio_segment)\n\n # Convert back to numpy\n normalized_audio_np = np.array(normalized_audio_segment.get_array_of_samples())\n\n return normalized_audio_np\n\n def _resize_op(self, audio: tf.Tensor, size: int) -> tf.Tensor:\n \"\"\"Resize the input audio to a specified size and normalize its amplitude to the range [0, 1].\n\n If the audio length is less than the specified size, zero padding is applied to reach the desired size.\n If the audio length is greater, it is truncated to the specified size.\n\n Args:\n audio (tf.Tensor): Input audio data as a TensorFlow tensor.\n size (int): The target size for the audio data.\n\n Returns:\n tf.Tensor: The resized and normalized audio data as a TensorFlow tensor.\n \"\"\"\n # Normalize dataset\n pylogger.info(\"Normalizing audio...\")\n audio = tf.cast(audio, dtype=tf.int16)\n # Calculate current length of the audio\n pylogger.info(\"Resizing audio to size {}...\".format(size))\n audio_length = tf.shape(audio)[0]\n audio = tf.cond(\n audio_length < size,\n lambda: tf.concat([audio, tf.zeros(size - audio_length, dtype=audio.dtype)], axis=0),\n lambda: audio[:size],\n )\n audio_np = tf.numpy_function(self.normalize_audio, [audio, self.data_config.audio_sample_rate], tf.int16)\n audio = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n audio = tf.cast(audio, dtype=tf.float32)\n pylogger.info(\"Converting audio to range [-1, 1]...\")\n max_intensity = self.data_config.audio_max_intensity\n audio = audio / max_intensity\n return audio\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocess the input audio data.\n\n This method resizes the audio data to a specified size based on the dataset configuration and normalizes the amplitude to the range [-1, +1].\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input audio data and any associated metadata.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed audio data and any associated metadata.\n \"\"\"\n pylogger.info(\"Preprocessing audios for split {}...\".format(self.split))\n audio = self._resize_op(\n audio=d[\"audio\"], size=int(self.data_config.audio_sample_rate * self.data_config.audio_max_duration)\n )\n audio = tf.reshape(\n tensor=audio,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Audio reshaped to shape {}...\".format(audio.shape))\n return dict(data=audio, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Postprocess the output audio data.\n\n This method applies the inverse of the preprocessing steps to revert the audio data to its original form.\n\n Args:\n batch_data (Any): A batch of audio data to postprocess.\n inverse_scaler (Callable): A function that applies the inverse of the preprocessing steps.\n\n Returns:\n Any: A batch of postprocessed audio data.\n \"\"\"\n max_intensity = self.data_config.audio_max_intensity\n batch_audio = inverse_scaler(batch_data)\n batch_audio = batch_audio * max_intensity\n batch_post_processed = tf.cast(batch_audio, tf.int16)\n audio_np = tf.numpy_function(\n self.normalize_audio, [batch_post_processed, self.data_config.audio_sample_rate], tf.int16\n )\n batch_post_processed = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n return batch_post_processed" }, { "identifier": "ImageDataset", "path": "src/functional_diffusion_processes/datasets/image_dataset.py", "snippet": "class ImageDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for handling image datasets.\n\n Provides a structured way to load, preprocess, and post-process image data.\n This class can be extended to handle specific image datasets as required.\n\n Attributes:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initializes the ImageDataset object with dataset configurations.\n\n Args:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def _resize_op(image: Any, size: int) -> Any:\n \"\"\"Resizes the input image to the specified size and normalizes its values to the range [0,1].\n\n Args:\n image (Any): A tensor representing the input image.\n size (int): The target size for each dimension of the output image.\n\n Returns:\n Any: A tensor representing the resized and normalized image.\n \"\"\"\n # convert to range [0,1]\n pylogger.info(\"Converting image to range [0,1]...\")\n image = tf.image.convert_image_dtype(image=image, dtype=tf.float32)\n\n # resize to size\n pylogger.info(\"Resizing image to size {}...\".format(size))\n\n image = tf.image.resize(images=image, size=[size, size])\n\n return image\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses the input data by resizing, possibly flipping, and applying uniform dequantization.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data with keys 'image' and optionally 'label'.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data, with keys 'data' and optionally 'label'.\n \"\"\"\n image = self._resize_op(image=d[\"image\"], size=self.data_config.image_width_size)\n\n pylogger.info(\"Preprocessing images for split {}...\".format(self.split))\n\n if self.data_config.random_flip and not self.evaluation:\n pylogger.info(\"Applying random flips...\")\n image = tf.image.random_flip_left_right(image=image, seed=self.data_config.seed)\n\n if self.data_config.uniform_dequantization:\n pylogger.info(\"Applying uniform dequantization...\")\n image = (\n tf.random.uniform(shape=image.shape, dtype=tf.float32, seed=self.data_config.seed) + image * 255.0\n ) / 256.0\n\n image = tf.reshape(\n tensor=image,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Image reshaped to shape {}...\".format(image.shape))\n\n return dict(data=image, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Post-processes the output data by reverting the preprocessing steps.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to invert the scaling applied to the data.\n\n Returns:\n Any: A batch of postprocessed data, arranged in a grid for visualization.\n \"\"\"\n batch_post_processed = make_grid_image(\n ndarray=process_images(images=batch_data),\n inverse_scaler=inverse_scaler,\n )\n return batch_post_processed" }, { "identifier": "BaseDataset", "path": "src/functional_diffusion_processes/datasets/base_dataset.py", "snippet": "class BaseDataset(abc.ABC):\n \"\"\"Abstract base class for defining datasets.\n\n Provides a template for loading, preprocessing, and iterating over datasets.\n It encapsulates common dataset configurations and operations while allowing for dataset-specific\n preprocessing and post-processing through abstract methods.\n\n Attributes:\n dataset_builder: A builder object for loading the dataset.\n data_config (DictConfig): Configuration parameters for the dataset.\n split (str): Specifies which split of the dataset to load, e.g., 'train', 'validation', or 'test'.\n evaluation (bool): Indicates whether the dataset is for evaluation purposes.\n dataset_options: Options for configuring the dataset pipeline.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Abstract base class for defining datasets.\n\n This class provides a skeleton for defining datasets, with abstract methods for\n preprocessing data, generating batches of data, and resizing images. Subclasses\n must implement these methods to define their specific datasets.\n\n Args:\n data_config (DictConfig): A dictionary-like object containing the configuration for\n loading the dataset.\n\n split (str): A string specifying which split of the dataset to load.\n\n evaluation (bool): A boolean specifying whether the dataset is for evaluation purposes.\n \"\"\"\n self.dataset_builder = None\n self.data_config = data_config\n self.split = split\n self.evaluation = evaluation\n self.dataset_options = tf.data.Options()\n self.dataset_options.experimental_optimization.map_parallelization = True\n self.dataset_options.experimental_threading.private_threadpool_size = 48\n self.dataset_options.experimental_threading.max_intra_op_parallelism = 1\n\n @abc.abstractmethod\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Abstract method for preprocessing input data.\n\n Subclasses should override this method to implement dataset-specific preprocessing.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data.\n \"\"\"\n raise NotImplementedError(\"Subclasses must implement preprocess_fn method.\")\n\n @abc.abstractmethod\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Abstract method for postprocessing output data.\n\n Subclasses should override this method to implement dataset-specific post-processing.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to inverse the scaling of the data.\n\n Returns:\n Any: A dictionary containing the postprocessed data.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the postprocess_fn method.\")\n\n def _generator(self) -> Iterator[Any]:\n \"\"\"Generate batches of preprocessed data.\n\n Loads the dataset, shuffles the data, applies preprocessing, and batches the data.\n Subclasses might override this method to implement dataset-specific batching logic.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n # load the dataset\n if isinstance(self.dataset_builder, tfds.core.DatasetBuilder):\n read_config = tfds.ReadConfig(options=self.dataset_options)\n if self.data_config.download:\n self.dataset_builder.download_and_prepare()\n ds = self.dataset_builder.as_dataset(\n split=self.split,\n shuffle_files=False,\n read_config=read_config,\n as_supervised=False,\n )\n else:\n ds = self.dataset_builder.with_options(options=self.dataset_options)\n\n ds = ds.shuffle(buffer_size=10000, seed=self.data_config.seed)\n\n # apply the preprocessing function to each element in the dataset\n ds = ds.map(map_func=self.preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # determine the batch size per device\n ds = ds.batch(batch_size=self.data_config.batch_size, drop_remainder=True)\n ds = ds.batch(batch_size=jax.device_count(), drop_remainder=True)\n\n ds = ds.repeat(count=100000 if not self.evaluation else 1)\n\n return iter(ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE))\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Return an iterator that generates batches of preprocessed data.\n\n Calls the `_generator` method to obtain an iterator for generating preprocessed data batches.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n return self._generator()\n\n def __len__(self) -> int:\n \"\"\"Return the number of examples in the dataset.\n\n Obtains the total number of examples in the specified dataset split from the dataset builder's info attribute.\n\n Returns:\n int: The number of examples in the dataset.\n \"\"\"\n return self.dataset_builder.info.splits[self.split].num_examples" }, { "identifier": "Loss", "path": "src/functional_diffusion_processes/losses/base_loss.py", "snippet": "class Loss(abc.ABC):\n \"\"\"Abstract class representing a loss function.\n\n Provides a framework for defining custom loss functions by enforcing the implementation\n of `construct_loss_fn` method in any derived classes. This class holds a reference to\n a stochastic differential equation (SDE) object which is used to calculate the weight factor for the loss.\n\n Attributes:\n sde (SDE): The stochastic differential equation instance associated with this loss.\n \"\"\"\n\n def __init__(self, sde: SDE) -> None:\n \"\"\"Initializes the Loss instance with a given SDE.\n\n Args:\n sde (SDE): An SDE instance which might be used in the loss computation.\n \"\"\"\n self.sde = sde\n\n def construct_loss_fn(self, model: Any) -> Callable:\n \"\"\"Abstract method to construct a loss function for a given model.\n\n This method should be implemented by any derived class to define the loss\n computation specific to the type of loss being implemented.\n\n Args:\n model (Any): The model for which to construct the loss function.\n\n Returns:\n Callable: A callable representing the constructed loss function.\n\n Raises:\n NotImplementedError: If the method is not implemented by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the construct_loss_fn method.\")" }, { "identifier": "FIDMetric", "path": "src/functional_diffusion_processes/metrics/fid_metric.py", "snippet": "class FIDMetric:\n \"\"\"Class for computing the Frechet Inception Distance (FID) metric.\n\n This class facilitates the computation of the FID metric, which measures the similarity between two distributions of images.\n It precomputes features for the real dataset using a specified Inception feature extractor and provides methods to compute\n and store features for generated images, and to compute the FID and Inception Score (IS).\n\n Attributes:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n generated_pools (list): List to store features of generated images.\n generated_logits (list): List to store logits of generated images.\n real_features (dict): Dictionary to store precomputed features of real dataset.\n \"\"\"\n\n def __init__(\n self,\n metric_config: DictConfig,\n feature_extractor: InceptionFeatureExtractor,\n dataset: BaseDataset,\n ) -> None:\n \"\"\"Initializes the FIDMetric class with specified configurations, feature extractor, and dataset.\n\n Args:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n \"\"\"\n self.metric_config = metric_config\n self.feature_extractor = feature_extractor\n self.dataset = dataset\n self.generated_pools = []\n self.generated_logits = []\n try:\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n except FileNotFoundError:\n self._precompute_features(\n dataset_name=metric_config.dataset_name,\n save_path=metric_config.real_features_path,\n )\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n\n def _precompute_features(self, dataset_name: str, save_path: str) -> None:\n \"\"\"Precomputes and saves features for the real dataset.\n\n Args:\n dataset_name (str): Name of the dataset.\n save_path (str): Path where the computed features will be saved.\n \"\"\"\n tf.io.gfile.makedirs(path=save_path)\n\n tf.io.gfile.makedirs(os.path.join(save_path, f\"{dataset_name.lower()}_clean\"))\n\n # Use the feature extractor to compute features for the real dataset\n all_pools = self.feature_extractor.extract_features(\n dataset=self.dataset, save_path=save_path, dataset_name=dataset_name\n )\n\n # Save latent represents of the Inception network to disk or Google Cloud Storage\n filename = f\"{dataset_name.lower()}_stats.npz\"\n\n if jax.host_id() == 0:\n pylogger.info(\"Saving real dataset stats to: %s\" % os.path.join(save_path, filename))\n\n with tf.io.gfile.GFile(os.path.join(save_path, filename), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=all_pools)\n f_out.write(io_buffer.getvalue())\n\n def compute_fid(self, eval_dir, num_sampling_round) -> Tuple[float, float]:\n \"\"\"Computes the FID and Inception Score (IS) for the generated and real images.\n\n Args:\n eval_dir (str): Directory path for evaluation.\n num_sampling_round (int): Number of sampling rounds.\n\n Returns:\n Tuple[float, float]: A tuple containing the FID and Inception Score.\n \"\"\"\n real_pools = self.real_features[\"pool_3\"]\n if not self.feature_extractor.inception_v3 and not self.feature_extractor.inception_v3 == \"lenet\":\n if len(self.generated_logits) == 0 or len(self.generated_pools) == 0:\n if jax.host_id() == 0:\n # Load all statistics that have been previously computed and saved for each host\n for host in range(jax.host_count()):\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n wait_message = False\n while len(stats) < num_sampling_round:\n if not wait_message:\n print(\"Waiting for statistics on host %d\" % (host,))\n wait_message = True\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n time.sleep(10)\n\n for stat_file in stats:\n with tf.io.gfile.GFile(stat_file, \"rb\") as fin:\n stat = np.load(fin)\n\n self.generated_pools.append(stat[\"pool_3\"])\n self.generated_logits.append(stat[\"logits\"])\n\n all_logits = np.concatenate(self.generated_logits, axis=0)[: self.metric_config.num_samples]\n inception_score = tfgan.eval.classifier_score_from_logits(logits=all_logits)\n else:\n inception_score = -1\n\n all_pools = np.concatenate(self.generated_pools, axis=0)[: self.metric_config.num_samples]\n\n fid = tfgan.eval.frechet_classifier_distance_from_activations(activations1=real_pools, activations2=all_pools)\n\n return fid, inception_score\n\n def compute_and_store_generated_features(self, images: Any, sample_dir: str, round_num: int) -> None:\n \"\"\"Computes features for the generated images and stores them in a specified directory.\n\n Args:\n images (Any): Tensor representing the generated images.\n sample_dir (str): Directory where the features will be stored.\n round_num (int): Round number in the training process.\n \"\"\"\n latents = self.feature_extractor.extract_features(images)\n\n self.generated_pools.append(latents[\"pool_3\"])\n\n gc.collect()\n\n if self.feature_extractor.model_name == \"inception\" or self.feature_extractor.inception_v3:\n self.generated_logits.append(latents[\"logits\"])\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(\n io_buffer,\n pool_3=latents[\"pool_3\"],\n logits=latents[\"logits\"],\n )\n\n f_out.write(io_buffer.getvalue())\n\n elif self.feature_extractor.model_name == \"lenet\":\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=latents[\"pool_3\"])\n f_out.write(io_buffer.getvalue())" }, { "identifier": "Sampler", "path": "src/functional_diffusion_processes/samplers/base_sampler.py", "snippet": "class Sampler(abc.ABC):\n \"\"\"Abstract base class for creating sampler objects.\n\n This class serves as a template for creating sampler objects which are\n designed to generate samples of a stochastic process governed by a\n specified stochastic differential equation (SDE). The process of sampling\n is carried out by employing specified predictor and corrector methods.\n\n Attributes:\n predictor (Predictor): The predictor method to be used in the sampling process.\n corrector (Corrector): The corrector method to be used in the sampling process.\n sde (SDE): The stochastic differential equation governing the process to be sampled.\n sampler_config (DictConfig): Configuration settings for the sampler.\n\n Methods:\n make_sampler(predict_fn: Callable) -> Callable:\n Abstract method to create a sampling function based on the specified predictor,\n corrector, and SDE.\n \"\"\"\n\n def __init__(self, predictor: Predictor, corrector: Corrector, sde: SDE, sampler_config: DictConfig) -> None:\n \"\"\"Initializes the Sampler object with specified predictor, corrector, SDE, and configuration.\n\n Args:\n predictor (Predictor): The predictor method for the sampler.\n corrector (Corrector): The corrector method for the sampler.\n sde (SDE): The stochastic differential equation governing the process.\n sampler_config (DictConfig): Configuration settings for the sampler.\n \"\"\"\n super().__init__()\n self.predictor = predictor\n self.corrector = corrector\n self.sampler_config = sampler_config\n self.sde = sde\n\n def make_sampler(self, predict_fn: Callable, auxiliary_fn: Union[Any, Callable]) -> Callable:\n \"\"\"Abstract method to create a sampler function.\n\n This method is intended to be overridden by derived classes to provide\n specific implementations for creating a sampler function. The sampler\n function will utilize the specified predictor and corrector methods\n along with the provided SDE to generate samples of the stochastic process.\n\n Args:\n predict_fn (Callable): The model prediction function.\n auxiliary_fn (Callable): The auxiliary prediction function for the model.\n\n Returns:\n Callable: The constructed sampling function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the make_sampler method.\")" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "filter_mask", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def filter_mask(shape, radius):\n device_num, batch_size, rows, cols, n_channels = shape\n crow, ccol = int(rows / 2), int(cols / 2)\n center = [crow, ccol]\n x, y = jnp.ogrid[:rows, :cols]\n mask_area = (x - center[0]) ** 2 + (y - center[1]) ** 2 >= radius * radius\n mask = jnp.ones_like(mask_area)\n mask = jnp.where(mask_area, 0, mask)\n mask = mask.reshape(1, 1, rows, cols, 1)\n mask = jnp.repeat(mask, device_num, axis=0)\n mask = jnp.repeat(mask, batch_size, axis=1)\n mask = jnp.repeat(mask, n_channels, axis=4)\n return mask" }, { "identifier": "make_grid_image", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def make_grid_image(ndarray: Any, inverse_scaler: Callable, padding: int = 2, pad_value: float = 0.0) -> Any:\n \"\"\"Make a grid image from a Numpy Array.\n\n Args:\n ndarray: The Numpy Array.\n inverse_scaler: The inverse scaler.\n padding: The padding.\n pad_value: The padding value.\n\n Returns:\n The grid image.\n \"\"\"\n ndarray = jnp.asarray(ndarray)\n\n if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images\n ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)\n\n n_row = int(np.sqrt(ndarray.shape[0]))\n # make the mini-batch of images into a grid\n n_maps = ndarray.shape[0]\n x_maps = min(n_row, n_maps)\n ymaps = int(math.ceil(float(n_maps) / x_maps))\n height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)\n num_channels = ndarray.shape[3]\n grid = np.full((height * ymaps + padding, width * x_maps + padding, num_channels), pad_value).astype(np.float32)\n k = 0\n for y in range(ymaps):\n for x in range(x_maps):\n if k >= n_maps:\n break\n grid[\n y * height + padding : (y + 1) * height,\n x * width + padding : (x + 1) * width,\n ] = ndarray[k]\n k = k + 1\n\n ndarr = inverse_scaler(grid)\n ndarr = jnp.clip(ndarr * 255, 0, 255).astype(jnp.uint8)\n return ndarr" }, { "identifier": "process_images", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def process_images(images: Any) -> Any:\n \"\"\"Reshape images to the correct shape.\n\n Args:\n images: Tensor of images to reshape.\n\n Returns:\n A tensor of images with the correct shape.\n \"\"\"\n w = np.sqrt(images.shape[2]).astype(int)\n h = np.sqrt(images.shape[2]).astype(int)\n o = images.shape[3]\n return images.reshape(-1, w, h, o)" }, { "identifier": "save_samples", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def save_samples(round_num: int, samples: Any, file_path: str) -> None:\n \"\"\"Save samples to a file.\n\n Args:\n round_num: The round number of the evaluation.\n samples: Tensor of samples to save.\n file_path: string of the Path to the file where the samples will be saved.\n \"\"\"\n for i in range(samples.shape[0]):\n clean_path = os.path.join(file_path, f\"clean/samples_{round_num}_{i}.npy\")\n np.save(clean_path, samples[i])\n samples_path = os.path.join(file_path, f\"samples_{round_num}.npz\")\n with tf.io.gfile.GFile(samples_path, \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, samples=samples)\n f_out.write(io_buffer.getvalue())" }, { "identifier": "to_grayscale", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "@jax.pmap\ndef to_grayscale(images):\n weights = np.array([0.2989, 0.5870, 0.1140])[None, None, None, :] # Extend dimensions\n grayscale_images = np.sum(images * weights, axis=-1)\n return grayscale_images" }, { "identifier": "get_data_inverse_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_inverse_scaler(is_centered: bool) -> Callable:\n \"\"\"Inverse data normalizer.\n\n Rescale data to original range at the end of the diffusion.\n\n Args:\n is_centered: boolean if True data will rescaled from [-1, 1] to [0, 1].\n \"\"\"\n if is_centered:\n # Rescale [-1, 1] to [0, 1]\n return lambda x: (x + 1.0) / 2.0\n else:\n return lambda x: x" }, { "identifier": "get_data_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_scaler(is_centered: bool) -> Callable:\n \"\"\"Normalize data. Assume data are always in [0, 1].\n\n Args:\n is_centered: boolean if True data will be centered in [-1, 1].\n \"\"\"\n if is_centered:\n # Rescale to [-1, 1]\n return lambda x: x * 2.0 - 1.0\n else:\n return lambda x: x" }, { "identifier": "TrainState", "path": "src/functional_diffusion_processes/utils/training_state.py", "snippet": "class TrainState(train_state.TrainState):\n \"\"\"The training state for the model.\"\"\"\n\n opt_state_params: Any\n ema_params: Any\n rng: jax.random.PRNGKey" }, { "identifier": "colorizing_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def colorizing_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, gray_scale_img: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform colorizing task on a given grayscale image.\n\n Args:\n sample_fn (Callable): The sampling function used for colorization.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for colorization.\n gray_scale_img (jnp.ndarray): The grayscale image to be colorized.\n\n Returns:\n Tuple: The updated state and the colorized image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, gray_scale_img)" }, { "identifier": "construct_sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_sampling_fn(model: flax.linen.Module, sampler: Sampler) -> Callable:\n \"\"\"Construct a sampling function for generating samples from the model.\n\n Args:\n model (flax.linen.Module): The model instance from which to generate samples.\n sampler (Sampler): The sampler instance used for sampling.\n\n Returns:\n Callable: The constructed sampling function.\n \"\"\"\n predict_fn = model.make_predict_fn()\n if isinstance(model, BaseMAML):\n super_resolution_fn = model.make_super_resolution_fn()\n sample_fn = sampler.make_sampler(predict_fn, super_resolution_fn)\n else:\n sample_fn = sampler.make_sampler(predict_fn, None)\n return sample_fn" }, { "identifier": "construct_train_step", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_train_step(optimizer, loss_fn) -> Callable:\n \"\"\"Construct a train step function to be used in the training loop.\n\n This function creates a training step function which, when called, performs\n a single step of training including forward pass, loss computation, and\n backward pass for gradient computation and updates.\n\n Args:\n optimizer: The optimizer instance used for updating model parameters.\n loss_fn: The loss function used for computing the loss.\n\n Returns:\n Callable: The constructed train step function.\n \"\"\"\n\n @partial(jax.pmap, axis_name=\"device\")\n def train_fn(\n rng,\n params,\n optim_params,\n step,\n batch_input,\n batch,\n ):\n grad_params, (new_rng, loss, loss_inner, batch_reconstructed, batch_corrupted, target) = loss_fn(\n rng, params, step, batch_input, batch\n )\n\n loss = jax.lax.pmean(loss, axis_name=\"device\")\n grad_params = jax.lax.pmean(grad_params, axis_name=\"device\")\n\n updates, optim_params = optimizer.update(grad_params, optim_params, params)\n\n params = optax.apply_updates(params, updates)\n params = clip_learning_rates(params)\n return new_rng, loss, loss_inner, params, optim_params, batch_reconstructed, batch_corrupted, target\n\n return train_fn" }, { "identifier": "inpainting_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def inpainting_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, image: jnp.ndarray, mask: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform inpainting task on a given image using a mask.\n\n Args:\n sample_fn (Callable): The sampling function used for inpainting.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for inpainting.\n image (jnp.ndarray): The image to be inpainted.\n mask (jnp.ndarray): The mask used for inpainting.\n\n Returns:\n Tuple: The updated state and the inpainted image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, image, mask)" }, { "identifier": "sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def sampling_fn(sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray) -> Tuple:\n \"\"\"Perform sampling task using a given sampling function.\n\n Args:\n sample_fn (Callable): The sampling function.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for sampling.\n\n Returns:\n Tuple: The updated state after performing the sampling.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params)" } ]
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
12,975
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6))
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6))
state = TrainState.create(
14
2023-10-24 22:01:35+00:00
16k
KosinskiLab/pyTME
tme/tests/test_structure.py
[ { "identifier": "Structure", "path": "tme/structure.py", "snippet": "class Structure:\n \"\"\"Represents atomic structures in accordance with the Protein Data Bank (PDB)\n format specification.\n\n Attributes\n ----------\n record_type : NDArray\n Type of the record, e.g., ATOM, HETATM. Array shape = (n,)\n atom_serial_number : NDArray\n Serial number assigned to each atom. Array shape = (n,)\n atom_name : NDArray\n Standardized names for each atom. Array shape = (n,)\n atom_coordinate : NDArray\n The 3D Cartesian coordinates of each atom in x, y, z. Array shape = (n,3 )\n alternate_location_indicator : NDArray\n Indicator for alternate locations of an atom if it exists in multiple places.\n Array shape = (n,)\n residue_name : NDArray\n Standard residue names where each atom belongs. Array shape = (n,)\n chain_identifier : NDArray\n Identifier for the chain where each atom is located. Array shape = (n,)\n residue_sequence_number : NDArray\n Sequence number of the residue in the protein chain for each atom.\n Array shape = (n,)\n code_for_residue_insertion : NDArray\n Code to denote any residue insertion. Array shape = (n,)\n occupancy : NDArray\n Occupancy factor of each atom, indicating the fraction of time the atom\n is located at its position. Array shape = (n,)\n temperature_factor : NDArray\n Measure of the atomic displacement or B-factor for each atom. Array shape = (n,)\n segment_identifier : NDArray\n Identifier for the segment where each atom belongs. Array shape = (n,)\n element_symbol : NDArray\n Atomic element symbol for each atom. Array shape = (n,)\n charge : NDArray\n Charge on the atom. Array shape = (n,)\n details : dict\n Any additional or auxiliary details. Array shape = (n,)\n\n References\n ----------\n .. [1] https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html\n \"\"\"\n\n #: Return a numpy array with record types, e.g. ATOM, HETATM.\n record_type: NDArray\n\n #: Return a numpy array with serial number of each atom.\n atom_serial_number: NDArray\n\n #: Return a numpy array with name of each atom.\n atom_name: NDArray\n\n #: Return a numpy array with coordinates of each atom in x, y, z.\n atom_coordinate: NDArray\n\n #: Return a numpy array with alternate location indicates of each atom.\n alternate_location_indicator: NDArray\n\n #: Return a numpy array with originating residue names of each atom.\n residue_name: NDArray\n\n #: Return a numpy array with originating structure chain of each atom.\n chain_identifier: NDArray\n\n #: Return a numpy array with originating residue id of each atom.\n residue_sequence_number: NDArray\n\n #: Return a numpy array with insertion information d of each atom.\n code_for_residue_insertion: NDArray\n\n #: Return a numpy array with occupancy factors of each atom.\n occupancy: NDArray\n\n #: Return a numpy array with B-factors for each atom.\n temperature_factor: NDArray\n\n #: Return a numpy array with segment identifier for each atom.\n segment_identifier: NDArray\n\n #: Return a numpy array with element symbols of each atom.\n element_symbol: NDArray\n\n #: Return a numpy array with charges of each atom.\n charge: NDArray\n\n #: Returns a dictionary with class instance metadata.\n details: dict\n\n def __post_init__(self, *args, **kwargs):\n \"\"\"\n Initialize the structure and populate header details.\n\n Raises\n ------\n ValueError\n If other NDArray attributes to not match the number of atoms.\n If the shape of atom_coordinates and chain_identifier doesn't match.\n \"\"\"\n self._elements = Elements()\n self.details = self._populate_details(self.details)\n\n n_atoms = self.atom_coordinate.shape[0]\n for attribute in self.__dict__:\n value = getattr(self, attribute)\n if type(value) != np.ndarray:\n continue\n if value.shape[0] != n_atoms:\n raise ValueError(\n f\"Expected shape of {attribute}: {n_atoms}, got {value.shape[0]}.\"\n )\n\n def __getitem__(self, indices: List[int]) -> \"Structure\":\n \"\"\"\n Get a Structure instance for specified indices.\n\n Parameters\n ----------\n indices : Union[int, bool, NDArray]\n The indices to get.\n\n Returns\n -------\n Structure\n The Structure instance for the given indices.\n \"\"\"\n if type(indices) in (int, bool):\n indices = (indices,)\n\n indices = np.asarray(indices)\n attributes = (\n \"record_type\",\n \"atom_serial_number\",\n \"atom_name\",\n \"atom_coordinate\",\n \"alternate_location_indicator\",\n \"residue_name\",\n \"chain_identifier\",\n \"residue_sequence_number\",\n \"code_for_residue_insertion\",\n \"occupancy\",\n \"temperature_factor\",\n \"segment_identifier\",\n \"element_symbol\",\n \"charge\",\n )\n kwargs = {attr: getattr(self, attr)[indices] for attr in attributes}\n ret = self.__class__(**kwargs, details={})\n return ret\n\n def __repr__(self):\n \"\"\"\n Return a string representation of the Structure.\n\n Returns\n -------\n str\n The string representation.\n \"\"\"\n unique_chains = \"-\".join(\n [\n \",\".join([str(x) for x in entity])\n for entity in self.details[\"unique_chains\"]\n ]\n )\n min_atom = np.min(self.atom_serial_number)\n max_atom = np.max(self.atom_serial_number)\n n_atom = self.atom_serial_number.size\n\n min_residue = np.min(self.residue_sequence_number)\n max_residue = np.max(self.residue_sequence_number)\n n_residue = self.residue_sequence_number.size\n\n repr_str = (\n f\"Structure object at {id(self)}\\n\"\n f\"Unique Chains: {unique_chains}, \"\n f\"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], \"\n f\"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]\"\n )\n return repr_str\n\n def get_chains(self) -> List[str]:\n \"\"\"\n Returns a list of available chains.\n\n Returns\n -------\n list\n The list of available chains.\n \"\"\"\n return list(self.details[\"chain_weight\"].keys())\n\n def copy(self) -> \"Structure\":\n \"\"\"\n Returns a copy of the Structure instance.\n\n Returns\n -------\n Structure\n The copied Structure instance.\n \"\"\"\n return deepcopy(self)\n\n def _populate_details(self, details: Dict = {}) -> Dict:\n \"\"\"\n Populate the details dictionary with the data from the Structure instance.\n\n Parameters\n ----------\n details : dict, optional\n The initial details dictionary, by default {}.\n\n Returns\n -------\n dict\n The populated details dictionary.\n \"\"\"\n details[\"weight\"] = np.sum(\n [self._elements[atype].atomic_weight for atype in self.element_symbol]\n )\n\n label, idx, chain = np.unique(\n self.chain_identifier, return_inverse=True, return_index=True\n )\n chain_weight = np.bincount(\n chain,\n [self._elements[atype].atomic_weight for atype in self.element_symbol],\n )\n labels = self.chain_identifier[idx]\n details[\"chain_weight\"] = {key: val for key, val in zip(labels, chain_weight)}\n\n # Group non-unique chains in separate lists in details[\"unique_chains\"]\n details[\"unique_chains\"], temp = [], {}\n for chain_label in label:\n index = len(details[\"unique_chains\"])\n chain_sequence = \"\".join(\n [\n str(y)\n for y in self.element_symbol[\n np.where(self.chain_identifier == chain_label)\n ]\n ]\n )\n if chain_sequence not in temp:\n temp[chain_sequence] = index\n details[\"unique_chains\"].append([chain_label])\n continue\n idx = temp.get(chain_sequence)\n details[\"unique_chains\"][idx].append(chain_label)\n\n filtered_data = [\n (label, integer)\n for label, integer in zip(\n self.chain_identifier, self.residue_sequence_number\n )\n ]\n filtered_data = sorted(filtered_data, key=lambda x: x[0])\n details[\"chain_range\"] = {}\n for label, values in groupby(filtered_data, key=lambda x: x[0]):\n values = [int(x[1]) for x in values]\n details[\"chain_range\"][label] = (min(values), max(values))\n\n return details\n\n @classmethod\n def from_file(\n cls,\n filename: str,\n keep_non_atom_records: bool = False,\n filter_by_elements: set = None,\n filter_by_residues: set = None,\n ) -> \"Structure\":\n \"\"\"\n Reads in an mmcif or pdb file and converts it into class instance.\n\n Parameters\n ----------\n filename : str\n Path to the mmcif or pdb file.\n keep_non_atom_records : bool, optional\n Wheter to keep residues that are not labelled ATOM.\n filter_by_elements: set, optional\n Which elements to keep. Default corresponds to all elements.\n filter_by_residues: set, optional\n Which residues to keep. Default corresponds to all residues.\n\n Raises\n ------\n ValueError\n If the extension is not '.pdb' or '.cif'.\n\n Returns\n -------\n Structure\n Read in structure file.\n \"\"\"\n _, file_extension = splitext(basename(filename.upper()))\n if file_extension == \".PDB\":\n func = cls._load_pdb\n elif file_extension == \".CIF\":\n func = cls._load_mmcif\n else:\n raise NotImplementedError(\n \"Could not determine structure filetype from extension.\"\n \" Supported filetypes are mmcif (.cif) and pdb (.pdb).\"\n )\n data = func(filename)\n\n keep = np.ones(data[\"element_symbol\"].size, dtype=bool)\n if filter_by_elements:\n keep = np.logical_and(\n keep,\n np.in1d(data[\"element_symbol\"], np.array(list(filter_by_elements))),\n )\n if filter_by_residues:\n keep = np.logical_and(\n keep, np.in1d(data[\"residue_name\"], np.array(list(filter_by_residues)))\n )\n if not keep_non_atom_records:\n keep = np.logical_and(keep, data[\"record_type\"] == \"ATOM\")\n\n for key in data:\n if key == \"details\":\n continue\n if type(data[key]) == np.ndarray:\n data[key] = data[key][keep]\n else:\n data[key] = [x for x, flag in zip(data[key], keep) if flag]\n\n data[\"details\"][\"filepath\"] = filename\n\n return cls(**data)\n\n @staticmethod\n def _load_mmcif(filename: str) -> Dict:\n \"\"\"\n Parses a macromolecular Crystallographic Information File (mmCIF)\n and returns the data in a dictionary format.\n\n Parameters\n ----------\n filename : str\n The filename of the mmCIF to load.\n\n Returns\n -------\n dict\n A dictionary of numpy arrays. Keys are the names of the PDB\n coordinate section. In addition, some details about the parsed\n structure are included. In case of conversion failure, the failing\n attribute is set to 0 if its supposed to be an integer value.\n \"\"\"\n result = MMCIFParser(filename)\n\n atom_site_mapping = {\n \"record_type\": (\"group_PDB\", str),\n \"atom_serial_number\": (\"id\", int),\n \"atom_name\": (\"label_atom_id\", str),\n \"alternate_location_indicator\": (\"label_alt_id\", str),\n \"residue_name\": (\"label_comp_id\", str),\n # \"chain_identifier\": (\"auth_asym_id\", str),\n \"chain_identifier\": (\"label_asym_id\", str),\n \"residue_sequence_number\": (\"label_seq_id\", int),\n \"code_for_residue_insertion\": (\"pdbx_PDB_ins_code\", str),\n \"occupancy\": (\"occupancy\", float),\n \"temperature_factor\": (\"B_iso_or_equiv\", float),\n \"segment_identifier\": (\"pdbx_PDB_model_num\", str),\n \"element_symbol\": (\"type_symbol\", str),\n \"charge\": (\"pdbx_formal_charge\", str),\n }\n\n out = {}\n for out_key, (atom_site_key, dtype) in atom_site_mapping.items():\n out_data = [\n x.strip() for x in result[\"atom_site\"].get(atom_site_key, [\".\"])\n ]\n if dtype == int:\n out_data = [0 if x == \".\" else int(x) for x in out_data]\n try:\n out[out_key] = np.asarray(out_data).astype(dtype)\n except ValueError:\n default = [\".\"] if dtype == str else 0\n print(f\"Converting {out_key} to {dtype} failed, set to {default}.\")\n out[out_key] = np.repeat(default, len(out_data)).astype(dtype)\n\n number_entries = len(max(out.values(), key=len))\n for key, value in out.items():\n if value.size != 1:\n continue\n out[key] = np.repeat(value, number_entries // value.size)\n\n out[\"details\"] = {}\n out[\"atom_coordinate\"] = np.transpose(\n np.array(\n [\n result[\"atom_site\"][\"Cartn_x\"],\n result[\"atom_site\"][\"Cartn_y\"],\n result[\"atom_site\"][\"Cartn_z\"],\n ],\n dtype=np.float32,\n )\n )\n\n detail_mapping = {\n \"resolution\": (\"em_3d_reconstruction\", \"resolution\", np.nan),\n \"resolution_method\": (\"em_3d_reconstruction\", \"resolution_method\", np.nan),\n \"method\": (\"exptl\", \"method\", np.nan),\n \"electron_source\": (\"em_imaging\", \"electron_source\", np.nan),\n \"illumination_mode\": (\"em_imaging\", \"illumination_mode\", np.nan),\n \"microscope_model\": (\"em_imaging\", \"microscope_model\", np.nan),\n }\n for out_key, (base_key, inner_key, default) in detail_mapping.items():\n if base_key not in result:\n continue\n out[\"details\"][out_key] = result[base_key].get(inner_key, default)\n\n return out\n\n @staticmethod\n def _load_pdb(filename: str) -> Dict:\n \"\"\"\n Parses a Protein Data Bank (PDB) file and returns the data\n in a dictionary format.\n\n Parameters\n ----------\n filename : str\n The filename of the PDB file to load.\n\n Returns\n -------\n dict\n A dictionary of numpy arrays. Keys are the names of the PDB\n coordinate section. In addition, some details about the parsed\n structure are included. In case of conversion failure, the failing\n attribute is set to 0 if its supposed to be an integer value.\n \"\"\"\n result = PDBParser(filename)\n\n atom_site_mapping = {\n \"record_type\": (\"record_type\", str),\n \"atom_serial_number\": (\"atom_serial_number\", int),\n \"atom_name\": (\"atom_name\", str),\n \"alternate_location_indicator\": (\"alternate_location_indicator\", str),\n \"residue_name\": (\"residue_name\", str),\n \"chain_identifier\": (\"chain_identifier\", str),\n \"residue_sequence_number\": (\"residue_sequence_number\", int),\n \"code_for_residue_insertion\": (\"code_for_residue_insertion\", str),\n \"occupancy\": (\"occupancy\", float),\n \"temperature_factor\": (\"temperature_factor\", float),\n \"segment_identifier\": (\"segment_identifier\", str),\n \"element_symbol\": (\"element_symbol\", str),\n \"charge\": (\"charge\", str),\n }\n\n out = {\"details\": result[\"details\"]}\n for out_key, (inner_key, dtype) in atom_site_mapping.items():\n out_data = [x.strip() for x in result[inner_key]]\n if dtype == int:\n out_data = [0 if x == \".\" else int(x) for x in out_data]\n try:\n out[out_key] = np.asarray(out_data).astype(dtype)\n except ValueError:\n default = \".\" if dtype == str else 0\n print(\n f\"Converting {out_key} to {dtype} failed. Setting {out_key} to {default}.\"\n )\n out[out_key] = np.repeat(default, len(out_data)).astype(dtype)\n\n out[\"atom_coordinate\"] = np.array(result[\"atom_coordinate\"], dtype=np.float32)\n\n return out\n\n def to_file(self, filename: str) -> None:\n \"\"\"\n Writes the Structure instance data to a Protein Data Bank (PDB) or\n macromolecular Crystallographic Information File (mmCIF) file depending\n one whether filename ends with '.pdb' or '.cif'.\n\n Raises\n ------\n ValueError\n If the extension is not '.pdb' or '.cif'.\n\n Parameters\n ----------\n filename : str\n The filename of the file to write.\n \"\"\"\n data_out = []\n if np.any(np.vectorize(len)(self.chain_identifier) > 2):\n warnings.warn(\"Chain identifiers longer than one will be shortened.\")\n\n _, file_extension = splitext(basename(filename.upper()))\n if file_extension == \".PDB\":\n func = self._write_pdb\n elif file_extension == \".CIF\":\n func = self._write_mmcif\n else:\n raise NotImplementedError(\n \"Could not determine structure filetype.\"\n \" Supported filetypes are mmcif (.cif) and pdb (.pdb).\"\n )\n\n if self.atom_coordinate.shape[0] > 10**5 and func == self._write_pdb:\n warnings.warn(\n \"The structure contains more than 100,000 atoms. Consider using mmcif.\"\n )\n\n with open(filename, mode=\"w\", encoding=\"utf-8\") as ofile:\n ofile.writelines(func())\n\n def _write_pdb(self) -> List[str]:\n \"\"\"\n Returns a PDB string representation of the structure instance.\n\n Returns\n -------\n list\n List containing PDB file coordine lines.\n \"\"\"\n data_out = []\n for index in range(self.atom_coordinate.shape[0]):\n x, y, z = self.atom_coordinate[index, :]\n line = list(\" \" * 80)\n line[0:6] = f\"{self.record_type[index]:<6}\"\n line[6:11] = f\"{self.atom_serial_number[index]:>5}\"\n line[12:16] = f\"{self.atom_name[index]:<4}\"\n line[16] = f\"{self.alternate_location_indicator[index]:<1}\"\n line[17:20] = f\"{self.residue_name[index]:<3}\"\n line[21] = f\"{self.chain_identifier[index][0]:<1}\"\n line[22:26] = f\"{self.residue_sequence_number[index]:>4}\"\n line[26] = f\"{self.code_for_residue_insertion[index]:<1}\"\n line[30:38] = f\"{x:>8.3f}\"\n line[38:46] = f\"{y:>8.3f}\"\n line[46:54] = f\"{z:>8.3f}\"\n line[54:60] = f\"{self.occupancy[index]:>6.2f}\"\n line[60:66] = f\"{self.temperature_factor[index]:>6.2f}\"\n line[72:76] = f\"{self.segment_identifier[index]:>4}\"\n line[76:78] = f\"{self.element_symbol[index]:<2}\"\n line[78:80] = f\"{self.charge[index]:>2}\"\n data_out.append(\"\".join(line))\n data_out.append(\"END\")\n data_out = \"\\n\".join(data_out)\n return data_out\n\n def _write_mmcif(self) -> List[str]:\n \"\"\"\n Returns a MMCIF string representation of the structure instance.\n\n Returns\n -------\n list\n List containing MMCIF file coordinate lines.\n \"\"\"\n model_num, entity_id = 1, 1\n data = {\n \"group_PDB\": [],\n \"id\": [],\n \"type_symbol\": [],\n \"label_atom_id\": [],\n \"label_alt_id\": [],\n \"label_comp_id\": [],\n \"label_asym_id\": [],\n \"label_entity_id\": [],\n \"label_seq_id\": [],\n \"pdbx_PDB_ins_code\": [],\n \"Cartn_x\": [],\n \"Cartn_y\": [],\n \"Cartn_z\": [],\n \"occupancy\": [],\n \"B_iso_or_equiv\": [],\n \"pdbx_formal_charge\": [],\n \"auth_seq_id\": [],\n \"auth_comp_id\": [],\n \"auth_asym_id\": [],\n \"auth_atom_id\": [],\n \"pdbx_PDB_model_num\": [],\n }\n\n for index in range(self.atom_coordinate.shape[0]):\n x, y, z = self.atom_coordinate[index, :]\n data[\"group_PDB\"].append(self.record_type[index])\n data[\"id\"].append(str(self.atom_serial_number[index]))\n data[\"type_symbol\"].append(self.element_symbol[index])\n data[\"label_atom_id\"].append(self.atom_name[index])\n data[\"label_alt_id\"].append(self.alternate_location_indicator[index])\n data[\"label_comp_id\"].append(self.residue_name[index])\n data[\"label_asym_id\"].append(self.chain_identifier[index][0])\n data[\"label_entity_id\"].append(str(entity_id))\n data[\"label_seq_id\"].append(str(self.residue_sequence_number[index]))\n data[\"pdbx_PDB_ins_code\"].append(self.code_for_residue_insertion[index])\n data[\"Cartn_x\"].append(f\"{x:.3f}\")\n data[\"Cartn_y\"].append(f\"{y:.3f}\")\n data[\"Cartn_z\"].append(f\"{z:.3f}\")\n data[\"occupancy\"].append(f\"{self.occupancy[index]:.2f}\")\n data[\"B_iso_or_equiv\"].append(f\"{self.temperature_factor[index]:.2f}\")\n data[\"pdbx_formal_charge\"].append(self.charge[index])\n data[\"auth_seq_id\"].append(str(self.residue_sequence_number[index]))\n data[\"auth_comp_id\"].append(self.residue_name[index])\n data[\"auth_asym_id\"].append(self.chain_identifier[index][0])\n data[\"auth_atom_id\"].append(self.atom_name[index])\n data[\"pdbx_PDB_model_num\"].append(str(model_num))\n\n output_data = {\"atom_site\": data}\n original_file = self.details.get(\"filepath\", \"\")\n try:\n new_data = {k: v for k, v in MMCIFParser(original_file).items()}\n index = self.atom_serial_number - 1\n new_data[\"atom_site\"] = {\n k: [v[i] for i in index] for k, v in new_data[\"atom_site\"].items()\n }\n new_data[\"atom_site\"][\"Cartn_x\"] = data[\"Cartn_x\"]\n new_data[\"atom_site\"][\"Cartn_y\"] = data[\"Cartn_y\"]\n new_data[\"atom_site\"][\"Cartn_z\"] = data[\"Cartn_z\"]\n output_data = new_data\n except Exception:\n pass\n\n ret = \"\"\n for category, subdict in output_data.items():\n ret += \"#\\n\"\n is_loop = isinstance(subdict[list(subdict.keys())[0]], list)\n if not is_loop:\n for k in subdict:\n ret += f\"_{category}.{k}\\t{subdict[k]}\\n\"\n else:\n ret += \"loop_\\n\"\n ret += \"\".join([f\"_{category}.{k}\\n\" for k in subdict])\n padded_subdict = _format_mmcif_colunns(subdict)\n\n data = [\n \"\".join([str(x) for x in content])\n for content in zip(*padded_subdict.values())\n ]\n ret += \"\\n\".join([entry for entry in data]) + \"\\n\"\n\n return ret\n\n def subset_by_chain(self, chain: str = None) -> \"Structure\":\n \"\"\"\n Return a subset of the structure that contains only atoms belonging to\n a specific chain. If no chain is specified, all chains are returned.\n\n Parameters\n ----------\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n\n Returns\n -------\n Structure\n A subset of the original structure containing only the specified chain.\n \"\"\"\n chain = np.unique(self.chain_identifier) if chain is None else chain.split(\",\")\n keep = np.in1d(self.chain_identifier, chain)\n return self[keep]\n\n def subset_by_range(\n self,\n start: int,\n stop: int,\n chain: str = None,\n ) -> \"Structure\":\n \"\"\"\n Return a subset of the structure within a specific range of residues.\n\n Parameters\n ----------\n start : int\n The starting residue sequence number.\n\n stop : int\n The ending residue sequence number.\n\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n\n Returns\n -------\n Structure\n A subset of the original structure within the specified residue range.\n \"\"\"\n ret = self.subset_by_chain(chain=chain)\n keep = np.logical_and(\n ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop\n )\n return ret[keep]\n\n def center_of_mass(self) -> NDArray:\n \"\"\"\n Calculate the center of mass of the structure.\n\n Returns\n -------\n NDArray\n The center of mass of the structure.\n \"\"\"\n weights = [self._elements[atype].atomic_weight for atype in self.element_symbol]\n return np.dot(self.atom_coordinate.T, weights) / np.sum(weights)\n\n def rigid_transform(\n self,\n rotation_matrix: NDArray,\n translation: NDArray,\n use_geometric_center: bool = False,\n ) -> \"Structure\":\n \"\"\"\n Performs a rigid transform of internal structure coordinates.\n\n Parameters\n ----------\n rotation_matrix : NDArray\n The rotation matrix to apply to the coordinates.\n translation : NDArray\n The vector to translate the coordinates by.\n use_geometric_center : bool, optional\n Whether to use geometric or coordinate center.\n\n Returns\n -------\n Structure\n The transformed instance of :py:class:`tme.structure.Structure`.\n \"\"\"\n out = np.empty_like(self.atom_coordinate.T)\n rigid_transform(\n coordinates=self.atom_coordinate.T,\n rotation_matrix=rotation_matrix,\n translation=translation,\n out=out,\n use_geometric_center=use_geometric_center,\n )\n ret = self.copy()\n ret.atom_coordinate = out.T.copy()\n return ret\n\n def centered(self) -> Tuple[\"Structure\", NDArray]:\n \"\"\"\n Shifts the structure analogous to :py:meth:`tme.density.Density.centered`.\n\n Returns\n -------\n Structure\n A copy of the class instance whose data center of mass is in the\n center of the data array.\n NDArray\n The coordinate translation.\n\n See Also\n --------\n :py:meth:`tme.Density.centered`\n \"\"\"\n center_of_mass = self.center_of_mass()\n enclosing_box = minimum_enclosing_box(coordinates=self.atom_coordinate.T)\n shift = np.subtract(np.divide(enclosing_box, 2), center_of_mass)\n\n transformed_structure = self.rigid_transform(\n translation=shift, rotation_matrix=np.eye(shift.size)\n )\n\n return transformed_structure, shift\n\n def _coordinate_to_position(\n self,\n shape: Tuple[int],\n sampling_rate: Tuple[float],\n origin: Tuple[float],\n ) -> (NDArray, Tuple[str], Tuple[int], float, Tuple[float]):\n \"\"\"\n Converts coordinates to positions.\n\n Parameters\n ----------\n shape : Tuple[int,]\n The desired shape of the output array.\n\n sampling_rate : float\n The sampling rate of the output array in unit of self.atom_coordinate.\n\n origin : Tuple[float,]\n The origin of the coordinate system.\n Returns\n -------\n Tuple[NDArray, List[str], Tuple[int, ], float, Tuple[float,]]\n Returns positions, atom_types, shape, sampling_rate, and origin.\n \"\"\"\n coordinates = self.atom_coordinate.copy()\n atom_types = self.element_symbol.copy()\n\n # positions are in x, y, z map is z, y, x\n coordinates = coordinates[:, ::-1]\n\n sampling_rate = 1 if sampling_rate is None else sampling_rate\n adjust_origin = origin is not None and shape is None\n origin = coordinates.min(axis=0) if origin is None else origin\n positions = (coordinates - origin) / sampling_rate\n positions = np.rint(positions).astype(int)\n\n if adjust_origin:\n left_shift = positions.min(axis=0)\n positions -= left_shift\n shape = positions.max(axis=0) + 1\n origin = origin + np.multiply(left_shift, sampling_rate)\n\n if shape is None:\n shape = positions.max(axis=0) + 1\n\n valid_positions = np.sum(\n np.logical_and(positions < shape, positions >= 0), axis=1\n )\n\n positions = positions[valid_positions == positions.shape[1], :]\n atom_types = atom_types[valid_positions == positions.shape[1]]\n\n self.details[\"nAtoms_outOfBound\"] = 0\n if positions.shape[0] != coordinates.shape[0]:\n out_of_bounds = coordinates.shape[0] - positions.shape[0]\n print(f\"{out_of_bounds}/{coordinates.shape[0]} atoms were out of bounds.\")\n self.details[\"nAtoms_outOfBound\"] = out_of_bounds\n\n return positions, atom_types, shape, sampling_rate, origin\n\n def _position_to_vdw_sphere(\n self,\n positions: Tuple[float],\n atoms: Tuple[str],\n sampling_rate: Tuple[float],\n volume: NDArray,\n ) -> None:\n \"\"\"\n Updates a volume with van der Waals spheres.\n\n Parameters\n ----------\n positions : Tuple[float, float, float]\n The positions of the atoms.\n\n atoms : Tuple[str]\n The types of the atoms.\n\n sampling_rate : float\n The desired sampling rate in unit of self.atom_coordinate of the\n output array.\n\n volume : NDArray\n The volume to update.\n \"\"\"\n index_dict, vdw_rad, shape = {}, {}, volume.shape\n for atom_index, atom_position in enumerate(positions):\n atom_type = atoms[atom_index]\n if atom_type not in index_dict.keys():\n atom_vdwr = np.ceil(\n np.divide(self._elements[atom_type].vdwr, (sampling_rate * 100))\n ).astype(int)\n\n vdw_rad[atom_type] = atom_vdwr\n atom_slice = tuple(slice(-k, k + 1) for k in atom_vdwr)\n distances = np.linalg.norm(\n np.divide(\n np.mgrid[atom_slice],\n atom_vdwr.reshape((-1,) + (1,) * volume.ndim),\n ),\n axis=0,\n )\n index_dict[atom_type] = (distances <= 1).astype(volume.dtype)\n\n footprint = index_dict[atom_type]\n start = np.maximum(np.subtract(atom_position, vdw_rad[atom_type]), 0)\n stop = np.minimum(np.add(atom_position, vdw_rad[atom_type]) + 1, shape)\n volume_slice = tuple(slice(*coord) for coord in zip(start, stop))\n\n start_index = np.maximum(-np.subtract(atom_position, vdw_rad[atom_type]), 0)\n stop_index = np.add(\n footprint.shape,\n np.minimum(\n np.subtract(shape, np.add(atom_position, vdw_rad[atom_type]) + 1), 0\n ),\n )\n index_slice = tuple(slice(*coord) for coord in zip(start_index, stop_index))\n volume[volume_slice] += footprint[index_slice]\n\n def _position_to_scattering_factors(\n self,\n positions: NDArray,\n atoms: NDArray,\n sampling_rate: NDArray,\n volume: NDArray,\n lowpass_filter: bool = True,\n downsampling_factor: float = 1.35,\n source: str = \"peng1995\",\n ) -> None:\n \"\"\"\n Updates a volume with scattering factors.\n\n Parameters\n ----------\n positions : NDArray\n The positions of the atoms.\n atoms : NDArray\n Element symbols.\n sampling_rate : float\n Sampling rate that was used to convert coordinates to positions.\n volume : NDArray\n The volume to update.\n lowpass_filter : NDArray\n Whether the scattering factors hsould be lowpass filtered.\n downsampling_factor : NDArray\n Downsampling factor for scattering factor computation.\n source : str\n Which scattering factors to use\n\n Reference\n ---------\n https://github.com/I2PC/xmipp.\n \"\"\"\n scattering_profiles, shape = dict(), volume.shape\n for atom_index, point in enumerate(positions):\n if atoms[atom_index] not in scattering_profiles:\n spline = atom_profile(\n atom=atoms[atom_index],\n M=downsampling_factor,\n method=source,\n lfilter=lowpass_filter,\n )\n scattering_profiles.update({atoms[atom_index]: spline})\n\n atomic_radius = np.divide(\n self._elements[atoms[atom_index]].vdwr, sampling_rate * 100\n )\n starts = np.maximum(np.ceil(point - atomic_radius), 0).astype(int)\n stops = np.minimum(np.floor(point + atomic_radius), shape).astype(int)\n\n grid_index = np.meshgrid(\n *[range(start, stop) for start, stop in zip(starts, stops)]\n )\n distances = np.einsum(\n \"aijk->ijk\",\n np.array([(grid_index[i] - point[i]) ** 2 for i in range(len(point))]),\n dtype=np.float64,\n )\n distances = np.sqrt(distances)\n if not len(distances):\n grid_index, distances = point, 0\n np.add.at(\n volume,\n tuple(grid_index),\n scattering_profiles[atoms[atom_index]](distances),\n )\n\n def _get_atom_weights(\n self, atoms: Tuple[str] = None, weight_type: str = \"atomic_weight\"\n ) -> Tuple[float]:\n \"\"\"\n Returns weights of individual atoms according to a specified weight type.\n\n Parameters\n ----------\n atoms : Tuple of strings, optional\n The atoms to get the weights for. If None, weights for all atoms\n are used. Default is None.\n\n weight_type : str, optional\n The type of weights to return. This can either be 'atomic_weight',\n 'atomic_number', or 'van_der_waals_radius'. Default is 'atomic_weight'.\n\n Returns\n -------\n List[float]\n A list containing the weights of the atoms.\n \"\"\"\n atoms = self.element_symbol if atoms is None else atoms\n match weight_type:\n case \"atomic_weight\":\n weight = [self._elements[atom].atomic_weight for atom in atoms]\n case \"atomic_number\":\n weight = [self._elements[atom].atomic_number for atom in atoms]\n case _:\n raise NotImplementedError(\n \"weight_type can either be 'atomic_weight' or 'atomic_number'\"\n )\n return weight\n\n def to_volume(\n self,\n shape: Tuple[int] = None,\n sampling_rate: NDArray = None,\n origin: Tuple[float] = None,\n chain: str = None,\n weight_type: str = \"atomic_weight\",\n scattering_args: Dict = dict(),\n ) -> Tuple[NDArray, Tuple[int], NDArray]:\n \"\"\"\n Converts atom coordinates of shape [n x 3] x, y, z to a volume with\n index z, y, x.\n\n Parameters\n ----------\n shape : Tuple[int, ...], optional\n Desired shape of the output array. If shape is given its expected to be\n in z, y, x form.\n sampling_rate : float, optional\n Sampling rate of the output array in the unit of self.atom_coordinate\n origin : Tuple[float, ...], optional\n Origin of the coordinate system. If origin is given its expected to be\n in z, y, x form.\n chain : str, optional\n The chain identifier. If multiple chains should be selected they need\n to be a comma separated string, e.g. 'A,B,CE'. If chain None,\n all chains are returned. Default is None.\n weight_type : str, optional\n Which weight should be given to individual atoms.\n scattering_args : dict, optional\n Additional arguments for scattering factor computation.\n\n Returns\n -------\n Tuple[NDArray, Tuple[int], NDArray]\n The volume, its origin and the voxel size in Ångstrom.\n \"\"\"\n _weight_types = {\n \"atomic_weight\",\n \"atomic_number\",\n \"van_der_waals_radius\",\n \"scattering_factors\",\n \"lowpass_scattering_factors\",\n }\n _weight_string = \",\".join([f\"'{x}'\" for x in _weight_types])\n if weight_type not in _weight_types:\n raise NotImplementedError(f\"weight_type needs to be in {_weight_string}\")\n\n if sampling_rate is None:\n sampling_rate = np.ones(self.atom_coordinate.shape[1])\n sampling_rate = np.array(sampling_rate)\n if sampling_rate.size == 1:\n sampling_rate = np.repeat(sampling_rate, self.atom_coordinate.shape[1])\n elif sampling_rate.size != self.atom_coordinate.shape[1]:\n raise ValueError(\n \"sampling_rate should either be single value of array with\"\n f\"size {self.atom_coordinate.shape[1]}.\"\n )\n if \"source\" not in scattering_args:\n scattering_args[\"source\"] = \"peng1995\"\n\n temp = self.subset_by_chain(chain=chain)\n\n positions, atoms, shape, sampling_rate, origin = temp._coordinate_to_position(\n shape=shape, sampling_rate=sampling_rate, origin=origin\n )\n volume = np.zeros(shape, dtype=np.float32)\n if weight_type in (\"atomic_weight\", \"atomic_number\"):\n weights = temp._get_atom_weights(atoms=atoms, weight_type=weight_type)\n np.add.at(volume, tuple(positions.T), weights)\n elif weight_type == \"van_der_waals_radius\":\n self._position_to_vdw_sphere(positions, atoms, sampling_rate, volume)\n elif weight_type == \"scattering_factors\":\n self._position_to_scattering_factors(\n positions,\n atoms,\n sampling_rate,\n volume,\n lowpass_filter=False,\n **scattering_args,\n )\n elif weight_type == \"lowpass_scattering_factors\":\n self._position_to_scattering_factors(\n positions,\n atoms,\n sampling_rate,\n volume,\n lowpass_filter=True,\n **scattering_args,\n )\n\n self.details.update(temp.details)\n return volume, origin, sampling_rate\n\n @classmethod\n def compare_structures(\n cls,\n structure1: \"Structure\",\n structure2: \"Structure\",\n origin: NDArray = None,\n sampling_rate: float = None,\n weighted: bool = False,\n ) -> float:\n \"\"\"\n Compute root mean square deviation (RMSD) between two structures.\n\n Both structures need to have the same number of atoms. In practice, this means\n that *structure2* is a transformed version of *structure1*\n\n Parameters\n ----------\n structure1 : Structure\n Structure 1.\n\n structure2 : Structure\n Structure 2.\n\n origin : NDArray, optional\n Origin of the structure coordinate system.\n\n sampling_rate : float, optional\n Sampling rate if discretized on a grid in the unit of self.atom_coordinate.\n\n weighted : bool, optional\n Whether atoms should be weighted by their atomic weight.\n\n Returns\n -------\n float\n Root Mean Square Deviation (RMSD)\n \"\"\"\n if origin is None:\n origin = np.zeros(structure1.atom_coordinate.shape[1])\n\n coordinates1 = structure1.atom_coordinate\n coordinates2 = structure2.atom_coordinate\n atoms1, atoms2 = structure1.element_symbol, structure2.element_symbol\n if sampling_rate is not None:\n coordinates1 = np.rint((coordinates1 - origin) / sampling_rate).astype(int)\n coordinates2 = np.rint((coordinates2 - origin) / sampling_rate).astype(int)\n\n weights1 = np.array(structure1._get_atom_weights(atoms=atoms1))\n weights2 = np.array(structure2._get_atom_weights(atoms=atoms2))\n if not weighted:\n weights1 = np.ones_like(weights1)\n weights2 = np.ones_like(weights2)\n\n if not np.allclose(coordinates1.shape, coordinates2.shape):\n raise ValueError(\n \"Input structures need to have the same number of coordinates.\"\n )\n if not np.allclose(weights1.shape, weights2.shape):\n raise ValueError(\"Input structures need to have the same number of atoms.\")\n\n squared_diff = np.sum(np.square(coordinates1 - coordinates2), axis=1)\n weighted_quared_diff = squared_diff * ((weights1 + weights2) / 2)\n rmsd = np.sqrt(np.mean(weighted_quared_diff))\n\n return rmsd\n\n @classmethod\n def align_structures(\n cls,\n structure1: \"Structure\",\n structure2: \"Structure\",\n origin: NDArray = None,\n sampling_rate: float = None,\n weighted: bool = False,\n ) -> Tuple[\"Structure\", float]:\n \"\"\"\n Align the atom coordinates of structure2 to structure1 using\n the Kabsch algorithm.\n\n Both structures need to have the same number of atoms. In practice, this means\n that *structure2* is a subset of *structure1*\n\n Parameters\n ----------\n structure1 : Structure\n Structure 1.\n\n structure2 : Structure\n Structure 2.\n\n origin : NDArray, optional\n Origin of the structure coordinate system.\n\n sampling_rate : float, optional\n Voxel size if discretized on a grid.\n\n weighted : bool, optional\n Whether atoms should be weighted by their atomic weight.\n\n Returns\n -------\n Structure\n *structure2* aligned to *structure1*.\n float\n Root Mean Square Error (RMSE)\n \"\"\"\n if origin is None:\n origin = np.minimum(\n structure1.atom_coordinate.min(axis=0),\n structure2.atom_coordinate.min(axis=0),\n ).astype(int)\n\n initial_rmsd = cls.compare_structures(\n structure1=structure1,\n structure2=structure2,\n origin=origin,\n sampling_rate=sampling_rate,\n weighted=weighted,\n )\n\n reference = structure1.atom_coordinate.copy()\n query = structure2.atom_coordinate.copy()\n if sampling_rate is not None:\n reference, atoms1, shape, _, _ = structure1._coordinate_to_position(\n shape=None, sampling_rate=sampling_rate, origin=origin\n )\n query, atoms2, shape, _, _ = structure2._coordinate_to_position(\n shape=None, sampling_rate=sampling_rate, origin=origin\n )\n\n reference_mean = reference.mean(axis=0)\n query_mean = query.mean(axis=0)\n\n reference = reference - reference_mean\n query = query - query_mean\n\n corr = np.dot(query.T, reference)\n U, S, Vh = np.linalg.svd(corr)\n\n rotation = np.dot(Vh.T, U.T).T\n if np.linalg.det(rotation) < 0:\n Vh[2, :] *= -1\n rotation = np.dot(Vh.T, U.T).T\n\n translation = reference_mean - np.dot(query_mean, rotation)\n\n temp = structure1.copy()\n temp.atom_coordinate = reference + reference_mean\n ret = structure2.copy()\n ret.atom_coordinate = np.dot(query + query_mean, rotation) + translation\n\n final_rmsd = cls.compare_structures(\n structure1=temp,\n structure2=ret,\n origin=origin,\n sampling_rate=None,\n weighted=weighted,\n )\n\n print(f\"Initial RMSD: {initial_rmsd:.5f} - Final RMSD: {final_rmsd:.5f}\")\n\n return ret, final_rmsd" }, { "identifier": "euler_to_rotationmatrix", "path": "tme/matching_utils.py", "snippet": "def euler_to_rotationmatrix(angles: Tuple[float]) -> NDArray:\n \"\"\"\n Convert Euler angles to a rotation matrix.\n\n Parameters\n ----------\n angles : tuple\n A tuple representing the Euler angles in degrees.\n\n Returns\n -------\n NDArray\n The generated rotation matrix.\n \"\"\"\n if len(angles) == 1:\n angles = (angles, 0, 0)\n rotation_matrix = (\n Rotation.from_euler(\"zyx\", angles, degrees=True).as_matrix().astype(np.float32)\n )\n return rotation_matrix" }, { "identifier": "minimum_enclosing_box", "path": "tme/matching_utils.py", "snippet": "def minimum_enclosing_box(\n coordinates: NDArray,\n margin: NDArray = None,\n use_geometric_center: bool = False,\n) -> Tuple[int]:\n \"\"\"\n Computes the minimal enclosing box around coordinates with margin.\n\n Parameters\n ----------\n coordinates : NDArray\n Coordinates of which the enclosing box should be computed. The shape\n of this array should be [d, n] with d dimensions and n coordinates.\n margin : NDArray, optional\n Box margin. Defaults to None.\n use_geometric_center : bool, optional\n Whether the box should accommodate the geometric or the coordinate\n center. Defaults to False.\n\n Returns\n -------\n tuple\n Integers corresponding to the minimum enclosing box shape.\n \"\"\"\n point_cloud = np.asarray(coordinates)\n dim = point_cloud.shape[0]\n point_cloud = point_cloud - point_cloud.min(axis=1)[:, None]\n\n margin = np.zeros(dim) if margin is None else margin\n margin = np.asarray(margin).astype(int)\n\n norm_cloud = point_cloud - point_cloud.mean(axis=1)[:, None]\n # Adding one avoids clipping during scipy.ndimage.affine_transform\n shape = np.repeat(\n np.ceil(2 * np.linalg.norm(norm_cloud, axis=0).max()) + 1, dim\n ).astype(int)\n if use_geometric_center:\n hull = ConvexHull(point_cloud.T)\n distance, _ = max_euclidean_distance(point_cloud[:, hull.vertices].T)\n distance += np.linalg.norm(np.ones(dim))\n shape = np.repeat(np.rint(distance).astype(int), dim)\n\n return shape" } ]
from tempfile import mkstemp from os import remove from tme import Structure from tme.matching_utils import euler_to_rotationmatrix, minimum_enclosing_box import pytest import numpy as np
13,226
def test__getitem__(self): ret_single_index = self.structure[1] ret = self.structure[[1]] self.compare_structures(ret_single_index, ret) ret = self.structure[self.structure.record_type == "ATOM"] assert np.all(ret.record_type == "ATOM") ret = self.structure[self.structure.element_symbol == "C"] assert np.all(ret.element_symbol == "C") def test__repr__(self): unique_chains = "-".join( [ ",".join([str(x) for x in entity]) for entity in self.structure.details["unique_chains"] ] ) min_atom = np.min(self.structure.atom_serial_number) max_atom = np.max(self.structure.atom_serial_number) n_atom = self.structure.atom_serial_number.size min_residue = np.min(self.structure.residue_sequence_number) max_residue = np.max(self.structure.residue_sequence_number) n_residue = self.structure.residue_sequence_number.size repr_str = ( f"Structure object at {id(self.structure)}\n" f"Unique Chains: {unique_chains}, " f"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], " f"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]" ) assert repr_str == self.structure.__repr__() @pytest.mark.parametrize( "path", [ ("./tme/tests/data/Structures/5khe.cif"), ("./tme/tests/data/Structures/5khe.pdb"), ], ) def test_fromfile(self, path): _ = Structure.from_file(path) def test_fromfile_error(self): with pytest.raises(NotImplementedError): _ = Structure.from_file("madeup.extension") @pytest.mark.parametrize("file_format", [("cif"), ("pdb")]) def test_to_file(self, file_format): _, path = mkstemp() path = f"{path}.{file_format}" self.structure.to_file(path) read = self.structure.from_file(path) comparison = self.structure.copy() self.compare_structures(comparison, read, exclude_attributes=["details"]) def test_to_file_error(self): _, path = mkstemp() path = f"{path}.RAISERROR" with pytest.raises(NotImplementedError): self.structure.to_file(path) def test_subset_by_chain(self): chain = "A" ret = self.structure.subset_by_chain(chain=chain) assert np.all(ret.chain_identifier == chain) def test_subset_by_chain_range(self): chain, start, stop = "A", 0, 20 ret = self.structure.subset_by_range(chain=chain, start=start, stop=stop) assert np.all(ret.chain_identifier == chain) assert np.all( np.logical_and( ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop, ) ) def test_center_of_mass(self): center_of_mass = self.structure.center_of_mass() assert center_of_mass.shape[0] == self.structure.atom_coordinate.shape[1] assert np.allclose(center_of_mass, [-0.89391639, 29.94908928, -2.64736741]) def test_centered(self): ret, translation = self.structure.centered() box = minimum_enclosing_box(coordinates=self.structure.atom_coordinate.T) assert np.allclose(ret.center_of_mass(), np.divide(box, 2), atol=1) def test__get_atom_weights_error(self): with pytest.raises(NotImplementedError): self.structure._get_atom_weights( self.structure.atom_name, weight_type="RAISEERROR" ) def test_compare_structures(self): rmsd = Structure.compare_structures(self.structure, self.structure) assert rmsd == 0 rmsd = Structure.compare_structures( self.structure, self.structure, weighted=True ) assert rmsd == 0 translation = (3, 0, 0) structure_transform = self.structure.rigid_transform( translation=translation, rotation_matrix=np.eye(self.structure.atom_coordinate.shape[1]), ) rmsd = Structure.compare_structures(self.structure, structure_transform) assert np.allclose(rmsd, np.linalg.norm(translation)) def test_comopare_structures_error(self): ret = self.structure[[1, 2, 3, 4, 5]] with pytest.raises(ValueError): Structure.compare_structures(self.structure, ret) def test_align_structures(self):
STRUCTURE_ATTRIBUTES = [ "record_type", "atom_serial_number", "atom_name", "atom_coordinate", "alternate_location_indicator", "residue_name", "chain_identifier", "residue_sequence_number", "code_for_residue_insertion", "occupancy", "temperature_factor", "segment_identifier", "element_symbol", "charge", "details", ] class TestStructure: def setup_method(self): self.structure = Structure.from_file("./tme/tests/data/Structures/5khe.cif") _, self.path = mkstemp() def teardown_method(self): del self.structure remove(self.path) def compare_structures(self, structure1, structure2, exclude_attributes=[]): for attribute in STRUCTURE_ATTRIBUTES: if attribute in exclude_attributes: continue value = getattr(structure1, attribute) value_comparison = getattr(structure2, attribute) if type(value) == np.ndarray: assert np.all(value_comparison == value) else: assert value == value_comparison def test_initialization(self): structure = Structure( record_type=self.structure.record_type, atom_serial_number=self.structure.atom_serial_number, atom_name=self.structure.atom_name, atom_coordinate=self.structure.atom_coordinate, alternate_location_indicator=self.structure.alternate_location_indicator, residue_name=self.structure.residue_name, chain_identifier=self.structure.chain_identifier, residue_sequence_number=self.structure.residue_sequence_number, code_for_residue_insertion=self.structure.code_for_residue_insertion, occupancy=self.structure.occupancy, temperature_factor=self.structure.temperature_factor, segment_identifier=self.structure.segment_identifier, element_symbol=self.structure.element_symbol, charge=self.structure.charge, details=self.structure.details, ) for attribute in STRUCTURE_ATTRIBUTES: value = getattr(self.structure, attribute) value_comparison = getattr(structure, attribute) if type(value) == np.ndarray: assert np.all(value_comparison == value) else: assert value == value_comparison @pytest.mark.parametrize( "modified_attribute", [ ("record_type"), ("atom_serial_number"), ("atom_name"), ("atom_coordinate"), ("alternate_location_indicator"), ("residue_name"), ("chain_identifier"), ("residue_sequence_number"), ("code_for_residue_insertion"), ("occupancy"), ("temperature_factor"), ("segment_identifier"), ("element_symbol"), ], ) def test_initialization_errors(self, modified_attribute): kwargs = { attribute: getattr(self.structure, attribute) for attribute in STRUCTURE_ATTRIBUTES if attribute != modified_attribute } kwargs[modified_attribute] = getattr(self.structure, modified_attribute)[:1] with pytest.raises(ValueError): Structure(**kwargs) def test__getitem__(self): ret_single_index = self.structure[1] ret = self.structure[[1]] self.compare_structures(ret_single_index, ret) ret = self.structure[self.structure.record_type == "ATOM"] assert np.all(ret.record_type == "ATOM") ret = self.structure[self.structure.element_symbol == "C"] assert np.all(ret.element_symbol == "C") def test__repr__(self): unique_chains = "-".join( [ ",".join([str(x) for x in entity]) for entity in self.structure.details["unique_chains"] ] ) min_atom = np.min(self.structure.atom_serial_number) max_atom = np.max(self.structure.atom_serial_number) n_atom = self.structure.atom_serial_number.size min_residue = np.min(self.structure.residue_sequence_number) max_residue = np.max(self.structure.residue_sequence_number) n_residue = self.structure.residue_sequence_number.size repr_str = ( f"Structure object at {id(self.structure)}\n" f"Unique Chains: {unique_chains}, " f"Atom Range: {min_atom}-{max_atom} [N = {n_atom}], " f"Residue Range: {min_residue}-{max_residue} [N = {n_residue}]" ) assert repr_str == self.structure.__repr__() @pytest.mark.parametrize( "path", [ ("./tme/tests/data/Structures/5khe.cif"), ("./tme/tests/data/Structures/5khe.pdb"), ], ) def test_fromfile(self, path): _ = Structure.from_file(path) def test_fromfile_error(self): with pytest.raises(NotImplementedError): _ = Structure.from_file("madeup.extension") @pytest.mark.parametrize("file_format", [("cif"), ("pdb")]) def test_to_file(self, file_format): _, path = mkstemp() path = f"{path}.{file_format}" self.structure.to_file(path) read = self.structure.from_file(path) comparison = self.structure.copy() self.compare_structures(comparison, read, exclude_attributes=["details"]) def test_to_file_error(self): _, path = mkstemp() path = f"{path}.RAISERROR" with pytest.raises(NotImplementedError): self.structure.to_file(path) def test_subset_by_chain(self): chain = "A" ret = self.structure.subset_by_chain(chain=chain) assert np.all(ret.chain_identifier == chain) def test_subset_by_chain_range(self): chain, start, stop = "A", 0, 20 ret = self.structure.subset_by_range(chain=chain, start=start, stop=stop) assert np.all(ret.chain_identifier == chain) assert np.all( np.logical_and( ret.residue_sequence_number >= start, ret.residue_sequence_number <= stop, ) ) def test_center_of_mass(self): center_of_mass = self.structure.center_of_mass() assert center_of_mass.shape[0] == self.structure.atom_coordinate.shape[1] assert np.allclose(center_of_mass, [-0.89391639, 29.94908928, -2.64736741]) def test_centered(self): ret, translation = self.structure.centered() box = minimum_enclosing_box(coordinates=self.structure.atom_coordinate.T) assert np.allclose(ret.center_of_mass(), np.divide(box, 2), atol=1) def test__get_atom_weights_error(self): with pytest.raises(NotImplementedError): self.structure._get_atom_weights( self.structure.atom_name, weight_type="RAISEERROR" ) def test_compare_structures(self): rmsd = Structure.compare_structures(self.structure, self.structure) assert rmsd == 0 rmsd = Structure.compare_structures( self.structure, self.structure, weighted=True ) assert rmsd == 0 translation = (3, 0, 0) structure_transform = self.structure.rigid_transform( translation=translation, rotation_matrix=np.eye(self.structure.atom_coordinate.shape[1]), ) rmsd = Structure.compare_structures(self.structure, structure_transform) assert np.allclose(rmsd, np.linalg.norm(translation)) def test_comopare_structures_error(self): ret = self.structure[[1, 2, 3, 4, 5]] with pytest.raises(ValueError): Structure.compare_structures(self.structure, ret) def test_align_structures(self):
rotation_matrix = euler_to_rotationmatrix((20, -10, 45))
1
2023-10-20 13:46:01+00:00
16k
tonnetonne814/MB-iSTFT-BERT-VITS2-44100-Ja
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 300)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n return (phones, spec, wav, sid, tone, language, bert, ja_bert)\n\n def get_audio(self, filename):\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert = torch.load(bert_path)\n assert bert.shape[-1] == len(phone)\n except:\n bert = get_bert(text, word2ph, language_str, device=\"cuda\")\n torch.save(bert, bert_path)\n assert bert.shape[-1] == len(phone), phone\n\n if language_str == \"ZH\":\n bert = bert\n ja_bert = torch.zeros(768, len(phone))\n elif language_str == \"JP\":\n ja_bert = bert\n bert = torch.zeros(1024, len(phone))\n else:\n bert = torch.zeros(1024, len(phone))\n ja_bert = torch.zeros(768, len(phone))\n assert bert.shape[-1] == len(phone), (\n bert.shape,\n len(phone),\n sum(word2ph),\n p1,\n p2,\n t1,\n t2,\n pold,\n pold2,\n word2ph,\n text,\n w2pho,\n )\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 768, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=6,\n flow_share_parameter=False,\n use_transformer_flow=True,\n subbands=8, # add\n gen_istft_n_fft=16, # add\n gen_istft_hop_size=4, # add\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.subbands = subbands\n self.gen_istft_n_fft = gen_istft_n_fft\n self.gen_istft_hop_size = gen_istft_hop_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n # self.dec = Generator(\n # inter_channels,\n # resblock,\n # resblock_kernel_sizes,\n # resblock_dilation_sizes,\n # upsample_rates,\n # upsample_initial_channel,\n # upsample_kernel_sizes,\n # gin_channels=gin_channels,\n # )\n self.dec = Multistream_iSTFT_Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gen_istft_n_fft,\n gen_istft_hop_size, \n subbands,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers > 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(self, x, x_lengths, y, y_lengths, sid, tone, language, bert, ja_bert):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_),\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.pre_out_conv_1 = nn.Conv1d(\n 2 * filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_1 = modules.LayerNorm(filter_channels)\n self.pre_out_conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.pre_out_norm_2 = modules.LayerNorm(filter_channels)\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(nn.Linear(filter_channels, 1), nn.Sigmoid())\n\n def forward_probability(self, x, x_mask, dur, g=None):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = self.pre_out_conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_1(x)\n x = self.drop(x)\n x = self.pre_out_conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.pre_out_norm_2(x)\n x = self.drop(x)\n x = x * x_mask\n x = x.transpose(1, 2)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, x_mask, dur, g)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import os import torch import torch.distributed as dist import logging import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import generator_loss, discriminator_loss, feature_loss, kl_loss from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
11,145
spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cudnn.benchmark = True torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 torch.backends.cuda.enable_math_sdp(True) global_step = 0 def run(): #dist.init_process_group( # backend="gloo", # init_method="env://", # Due to some training problem,we proposed to use gloo instead of nccl. #) # Use torchrun instead of mp.spawn #rank = dist.get_rank() #n_gpus = dist.get_world_size() rank = 0 n_gpus = 1 hps = utils.get_hparams() torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=16, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(rank) net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None # net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) # net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) # if net_dur_disc is not None: # net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) try: if net_dur_disc is not None: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: # if not optim_dur_disc.param_groups[0].get("initial_lr"): # optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers ): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, ) in tqdm(enumerate(train_loader)): if net_g.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.mas_noise_scale_initial - net_g.noise_scale_delta * global_step ) net_g.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda( rank, non_blocking=True ) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda( rank, non_blocking=True ) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda( rank, non_blocking=True ) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) ja_bert = ja_bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_), ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length ) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax, ) y = commons.slice_segments( y, ids_slice * hps.data.hop_length, hps.train.segment_size ) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss( y_d_hat_r, y_d_hat_g ) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc( hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach() ) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all ( loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g, ) = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl loss_fm = feature_loss(fmap_r, fmap_g)
loss_gen, losses_gen = generator_loss(y_d_hat_g)
6
2023-10-16 10:04:32+00:00
16k
violet-sto/HN-GFN
main.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.test_mols = []\n self.all_mols = []\n self.train_mols_map = {}\n\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.proxy_repr_type, include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = torch.double\n else:\n self.mdp.floatX = torch.float\n self.mdp._cue_max_blocks = args.max_blocks\n self.max_blocks = args.max_blocks\n self.oracle = oracle\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n\n self.target_norm = [-8.6, 1.10] # for dockerscore\n\n self.hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n\n def load_h5(self, path, test_ratio=0.1, num_init_examples=None):\n import json\n columns = [\"smiles\", \"dockscore\",\"blockidxs\", \"slices\", \"jbonds\", \"stems\"]\n store = pd.HDFStore(path, 'r')\n df = store.select('df')\n # Pandas has problem with calculating some stuff on float16\n df.dockscore = df.dockscore.astype(\"float64\")\n for cl_mame in columns[2:]:\n df.loc[:, cl_mame] = df[cl_mame].apply(json.loads)\n\n test_idxs = self.test_split_rng.choice(\n len(df), int(test_ratio * len(df)), replace=False)\n\n split_bool = np.zeros(len(df), dtype=np.bool)\n split_bool[test_idxs] = True\n self.scores = []\n self.smis = []\n for i in tqdm(range(len(df))):\n m = BlockMoleculeDataExtended()\n for c in range(1, len(columns)):\n setattr(m, columns[c], df.iloc[i, c - 1])\n m.blocks = [self.mdp.block_mols[i] for i in m.blockidxs]\n if len(m.blocks) > self.max_blocks:\n continue\n m.numblocks = len(m.blocks)\n m.score = self.oracle.get_score([m])\n self.scores.append(m.score)\n self.smis.append(m.smiles)\n self.all_mols.append(m)\n if split_bool[i]: \n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n if len(self.train_mols)+len(self.test_mols) >= num_init_examples:\n break\n store.close()\n\n print(\"Sampling initial {} molecules from all {} molecules...\".format(\n num_init_examples, len(split_bool)))\n print(len(self.train_mols), 'train mols')\n print(len(self.test_mols), 'test mols')\n\n def r2r(self, dockscore=None, normscore=None):\n if dockscore is not None:\n normscore = 4-(min(0, dockscore) -\n self.target_norm[0])/self.target_norm[1]\n normscore = max(0.1, normscore)\n return (normscore/1) ** 1\n\n def _get(self, i, dset):\n return [(dset[i], dset[i].score)]\n\n def sample(self, n):\n eidx = np.random.randint(0, len(self.train_mols), n)\n samples = sum((self._get(i, self.train_mols) for i in eidx), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n s, r = mb\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n r = torch.tensor(pd.DataFrame.from_dict(\n r).values, device=self._device).float()\n return (s, r)\n\n def iterset(self, n, mode):\n if mode == 'test':\n dset = self.test_mols\n elif mode == 'train':\n dset = self.train_mols\n\n N = len(dset)\n for i in range(int(np.ceil(N/n))):\n samples = sum((self._get(j, dset)\n for j in range(i*n, min(N, (i+1)*n))), [])\n yield self.sample2batch(zip(*samples))\n\n def add_samples(self, batch):\n picked_mols, scores, picked_smis = batch\n\n for m in picked_mols:\n if np.random.uniform() < (1/10):\n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n self.all_mols.append(m)\n \n self.scores += scores\n self.smis += [smis[-1] for smis in picked_smis]\n \n self.stop_event.clear()\n\n def compute_hypervolume(self):\n scores = torch.tensor(pd.DataFrame.from_dict(self.scores).values)\n volume = self.hypervolume.compute(scores)\n\n return volume\n \n def start_samplers(self, n, mbsize):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(self.sample(mbsize))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "BlockMoleculeDataExtended", "path": "mol_mdp_ext.py", "snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}" }, { "identifier": "Oracle", "path": "oracle/oracle.py", "snippet": "class Oracle():\n def __init__(self, args, mols_ref=None):\n '''\n @params:\n args (dict): argsurations\n '''\n self.objectives = args.objectives\n self.fps_ref = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) \n for x in mols_ref] if mols_ref else None\n self.device = torch.device(args.device)\n\n def batch_get_scores(self, mols):\n '''\n @params:\n mols: molecules to estimate score\n @return:\n dicts (list): list of score dictionaries\n '''\n dicts = [{} for _ in mols]\n for obj in self.objectives:\n scores = get_scores(obj, mols, device=self.device)\n for i, mol in enumerate(mols):\n dicts[i][obj] = scores[i]\n return dicts\n \n def get_score(self, mol):\n scores = {}\n for obj in self.objectives:\n score = get_scores(obj, mol, device=self.device)\n scores[obj] = score[0]\n \n return scores" }, { "identifier": "get_proxy", "path": "proxy/proxy.py", "snippet": "def get_proxy(args, bpath, oracle):\n if args.acq_fn.lower() == 'none':\n return NoAF(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ucb':\n return UCB(args, bpath, oracle)\n \n elif args.acq_fn.lower() == 'ucb_chebyshev':\n return UCB_chebyshev(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ei':\n return EI(args, bpath, oracle)" }, { "identifier": "FMGFlowNet", "path": "generator/gfn.py", "snippet": "class FMGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n mdp = MolMDPExtended(bpath)\n mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n mdp.build_translation_table()\n self.model = make_model(args, mdp, is_proxy=False)\n self.opt = torch.optim.Adam(self.model.parameters(\n ), args.learning_rate, weight_decay=args.weight_decay)\n\n self.loginf = 1000 # to prevent nans\n self.log_reg_c = args.log_reg_c\n self.balanced_loss = args.balanced_loss\n self.do_nblocks_reg = False\n self.max_blocks = args.max_blocks\n self.leaf_coef = args.leaf_coef\n self.clip_grad = args.clip_grad\n # self.score_criterion = nn.MSELoss(reduction='none')\n self.score_criterion = nn.MSELoss()\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss, term_loss, flow_loss = self.FMLoss(p, pb, a, pw, w, r, s, d)\n\n self.opt.zero_grad()\n loss.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.clip_grad)\n self.opt.step()\n self.model.training_steps = i+1\n \n return (loss.item(), term_loss.item(), flow_loss.item())\n\n def FMLoss(self, p, pb, a, pw, w, r, s, d):\n # Since we sampled 'mbsize' trajectories, we're going to get\n # roughly mbsize * H (H is variable) transitions\n ntransitions = r.shape[0]\n # state outputs\n stem_out_s, mol_out_s = self.model(s, w) # log(F)\n # parents of the state outputs\n stem_out_p, mol_out_p = self.model(p, pw)\n # index parents by their corresponding actions\n qsa_p = self.model.index_output_by_action(\n p, stem_out_p, mol_out_p[:, 0], a)\n # then sum the parents' contribution, this is the inflow\n exp_inflow = (torch.zeros((ntransitions,), device=qsa_p.device, dtype=qsa_p.dtype)\n .index_add_(0, pb, torch.exp(qsa_p))) # pb is the parents' batch index\n inflow = torch.log(exp_inflow + self.log_reg_c)\n # sum the state's Q(s,a), this is the outflow\n exp_outflow = self.model.sum_output(s, torch.exp(\n stem_out_s), torch.exp(mol_out_s[:, 0]))\n # include reward and done multiplier, then take the log\n # we're guarenteed that r > 0 iff d = 1, so the log always works\n outflow_plus_r = torch.log(self.log_reg_c + r + exp_outflow * (1-d))\n if self.do_nblocks_reg:\n losses = _losses = ((inflow - outflow_plus_r) /\n (s.nblocks * self.max_blocks)).pow(2)\n else:\n losses = _losses = (inflow - outflow_plus_r).pow(2)\n\n term_loss = (losses * d).sum() / (d.sum() + 1e-20) # terminal nodes\n flow_loss = (losses * (1-d)).sum() / \\\n ((1-d).sum() + 1e-20) # non-terminal nodes\n \n if self.balanced_loss:\n loss = term_loss * self.leaf_coef + flow_loss\n else:\n loss = losses.mean()\n\n return loss, term_loss, flow_loss" }, { "identifier": "TBGFlowNet", "path": "generator/gfn.py", "snippet": "class TBGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n self.model = make_model(args, self.mdp, is_proxy=False)\n self.Z = nn.Sequential(nn.Linear(len(args.objectives), args.nemb//2), nn.LeakyReLU(),\n nn.Linear(args.nemb//2, 1))\n self.Z.to(args.device)\n self.opt = torch.optim.Adam(self.model.parameters(), args.learning_rate, weight_decay=args.weight_decay)\n self.opt_Z = torch.optim.Adam(self.Z.parameters(), args.Z_learning_rate, weight_decay=args.weight_decay)\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss = self.TBLoss(p, a, w, r, d, mols)\n self.opt.zero_grad()\n self.opt_Z.zero_grad()\n loss.backward()\n if self.args.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.args.clip_grad)\n self.opt.step()\n self.opt_Z.step()\n\n return (loss.item(),)\n\n @property\n def Z(self):\n return self.model.Z\n\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n backward_ll = scatter(torch.log(1/n), b, reduce='sum')\n\n losses = ((self.Z(w[d==1.]) + forward_ll) - (torch.log(r[d == 1.]) + backward_ll)).pow(2) \n loss = losses.mean()\n\n return loss" }, { "identifier": "circle_points", "path": "utils/metrics.py", "snippet": "def circle_points(K, min_angle=None, max_angle=None):\n # generate evenly distributed preference vector\n ang0 = 1e-6 if min_angle is None else min_angle\n ang1 = np.pi / 2 - ang0 if max_angle is None else max_angle\n angles = np.linspace(ang0, ang1, K, endpoint=True)\n x = np.cos(angles)\n y = np.sin(angles)\n weights = np.c_[x, y]\n normalized_weights = weights/weights.sum(1, keepdims=True)\n\n return normalized_weights.astype(np.float32)" }, { "identifier": "compute_success", "path": "utils/metrics.py", "snippet": "def compute_success(mols, scores, objectives, score_succ):\n print(\"Computing successful rate...\")\n positive_mols = []\n success_dict = {k: 0. for k in objectives}\n\n for mol, score in zip(mols, scores):\n all_success = True\n for k, v in score.items():\n if v >= score_succ[k]:\n success_dict[k] += 1\n else:\n all_success = False\n if all_success:\n positive_mols.append(mol)\n\n success = 1.*len(positive_mols)/len(mols)\n\n return success, positive_mols" }, { "identifier": "compute_diversity", "path": "utils/metrics.py", "snippet": "def compute_diversity(mols):\n print(\"Computing diversity...\")\n\n if len(mols) == 0:\n return 0\n\n sims = []\n fps = [AllChem.GetMorganFingerprintAsBitVect(x.mol, 3, 2048) for x in mols]\n for i in range(len(fps)):\n sims += DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])\n\n return 1 - np.mean(sims)" }, { "identifier": "compute_novelty", "path": "utils/metrics.py", "snippet": "def compute_novelty(mols, ref_mols):\n print(\"Computing novelty...\")\n positive_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x.mol, 3, 2048) for x in mols]\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x, 3, 2048) for x in ref_mols]\n\n n_sim = 0.\n for i in range(len(positive_fps)):\n sims = DataStructs.BulkTanimotoSimilarity(positive_fps[i], ref_fps)\n if max(sims) >= 0.4:\n n_sim += 1\n novelty = 1. - 1. * n_sim / (len(positive_fps)+1e-6)\n\n return novelty" }, { "identifier": "evaluate", "path": "utils/metrics.py", "snippet": "def evaluate(args, generator, rollout_worker, k):\n time_start = time.time()\n print(f\"Sampling molecules and evaluating...\")\n test_weights = rollout_worker.test_weights\n picked_mols = []\n all_scores = []\n # top_scores = []\n top_scores = defaultdict(list)\n mean_scores = []\n hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n \n for weights in test_weights:\n sampled_mols = []\n rewards = []\n scores = []\n for i in range(args.num_samples):\n rollout_worker.rollout(\n generator, use_rand_policy=False, weights=weights.unsqueeze(0))\n (raw_r, _, m, _, _) = rollout_worker.sampled_mols[-1]\n sampled_mols.append(m)\n rewards.append(raw_r[0])\n scores.append(raw_r[1])\n\n idx_pick = np.argsort(rewards)[::-1][:k] \n picked_mols += np.array(sampled_mols)[idx_pick].tolist()\n top_rewards = np.array(rewards)[idx_pick]\n mean_scores.append(np.array(scores).mean(0))\n \n picked_scores = np.array(scores)[idx_pick]\n weight_specific_volume = hypervolume.compute(torch.tensor(picked_scores))\n print(f'Hypervolume w.r.t test weights {weights}: {weight_specific_volume}')\n \n for K in [10, 100]:\n scores_np = np.array(scores)\n top_scores_weight = [scores_np[np.argsort(scores_np[:,i])[::-1][:K], i].mean() for i in range(len(args.objectives))]\n top_scores[K].append(top_scores_weight)\n print(f'Top {K} scores w.r.t test weights {weights}: {top_scores_weight}')\n \n all_scores += scores\n print('Top_rewards: {}'.format(top_rewards.mean())) # Top-100 rewards\n \n volume = hypervolume.compute(torch.tensor(all_scores))\n diversity = compute_diversity(picked_mols) # Top-100\n\n print('Hypervolume: {}, Diversity: {}, Time: {}'.format(\n volume, diversity, time.time()-time_start))\n\n return volume, diversity" }, { "identifier": "compute_correlation", "path": "utils/metrics.py", "snippet": "def compute_correlation(args, model, rollout_worker, test_mols):\n\n mdp = rollout_worker.mdp\n device = args.device\n def tf(x): return torch.tensor(x, device=device).to(torch.float)\n def tint(x): return torch.tensor(x, device=device).long()\n\n # test_mols = pickle.load(gzip.open('data/some_mols_U_1k.pkl.gz'))\n logsoftmax = nn.LogSoftmax(0)\n corrs = []\n numblocks = []\n\n start_time = time.time()\n if args.n_objectives == 3:\n test_weights = rollout_worker.test_weights[::2]\n elif args.n_objectives == 4:\n test_weights = rollout_worker.test_weights[1:-2:4]\n else:\n test_weights = rollout_worker.test_weights\n \n for weights in test_weights:\n print(\"Computing correlation w.r.t test weights {}\".format(weights))\n weights = torch.tensor(weights).to(args.device)\n logp = []\n rewards = []\n for m in tqdm(test_mols):\n try:\n agraph = get_mol_path_graph(m, mdp)\n except:\n continue\n # rewards.append(np.log(moli[0][0]))\n reward = rollout_worker._get_reward(m, weights)[0].item()\n rewards.append(np.log(reward))\n s = mdp.mols2batch([mdp.mol2repr(agraph.nodes[i]['mol'])\n for i in agraph.nodes])\n numblocks.append(len(m.blocks))\n with torch.no_grad():\n # get the mols_out_s for ALL molecules not just the end one.\n if args.condition_type == 'Hyper_scorepred':\n stem_out_s, mol_out_s, _ = model(\n s, weights.repeat(s.num_graphs, 1))\n else:\n stem_out_s, mol_out_s = model(\n s, weights.repeat(s.num_graphs, 1))\n per_mol_out = []\n # Compute pi(a|s)\n for j in range(len(agraph.nodes)):\n a, b = s._slice_dict['stems'][j:j+2]\n\n stop_allowed = len(\n agraph.nodes[j]['mol'].blocks) >= args.min_blocks\n mp = logsoftmax(torch.cat([\n stem_out_s[a:b].reshape(-1),\n # If num_blocks < min_blocks, the model is not allowed to stop\n mol_out_s[j, :1] if stop_allowed else tf([-1000])]))\n per_mol_out.append(\n (mp[:-1].reshape((-1, stem_out_s.shape[1])), mp[-1]))\n\n # When the model reaches 8 blocks, it is stopped automatically. If instead it stops before\n # that, we need to take into account the STOP action's logprob\n if len(m.blocks) < 8:\n if args.condition_type == 'Hyper_scorepred':\n stem_out_last, mol_out_last, _ = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0))\n else:\n stem_out_last, mol_out_last = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0)) \n mplast = logsoftmax(\n torch.cat([stem_out_last.reshape(-1), mol_out_last[0, :1]]))\n MSTOP = mplast[-1]\n\n # assign logprob to edges\n for u, v in agraph.edges:\n a = agraph.edges[u, v]['action']\n if a[0] == -1:\n agraph.edges[u, v]['logprob'] = per_mol_out[v][1]\n else:\n agraph.edges[u,\n v]['logprob'] = per_mol_out[v][0][a[1], a[0]]\n\n # propagate logprobs through the graph\n for n in list(nx.topological_sort(agraph))[::-1]:\n for c in agraph.predecessors(n):\n if len(m.blocks) < 8 and c == 0:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0) + MSTOP)\n else:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0))\n\n # add the first item\n # logp.append((moli, agraph.nodes[n]['logprob'].item()))\n logp.append(agraph.nodes[n]['logprob'].item())\n corrs.append(stats.spearmanr(rewards, logp).correlation)\n\n print('Spearmanr: {}, mean: {}, Time: {}'.format(corrs, np.mean(corrs), time.time()-start_time))\n return corrs" }, { "identifier": "set_random_seed", "path": "utils/utils.py", "snippet": "def set_random_seed(seed, deterministic=True):\n \"\"\"Set random seed.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "get_logger", "path": "utils/logging.py", "snippet": "def get_logger(args):\n if args.enable_tensorboard:\n return TensorboardLogger(args)\n else:\n return Logger(args)" } ]
from curses import raw from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet from utils.metrics import circle_points, compute_success, compute_diversity, compute_novelty, evaluate, compute_correlation from utils.utils import set_random_seed from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from rdkit.Chem import AllChem from rdkit import DataStructs from pymoo.util.ref_dirs import get_reference_directions import os import argparse import json import time import threading import pdb import pickle import gzip import warnings import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np
12,252
stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(loss), 3) for loss in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/train', train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, diversity = evaluate(args, generator, rollout_worker, 100) corrs = compute_correlation(args, generator, rollout_worker, rollout_worker.test_mols) args.logger.add_scalar( 'Top-100-sampled/volumes', volume, use_context=False) args.logger.add_scalar( 'Top-100-sampled/dists', diversity, use_context=False) args.logger.add_scalar( 'Top-100-sampled/corr', np.mean(corrs), use_context=False) if do_save: save_stuff(i) if volume > best_hv: best_hv = volume if do_save: save_stuff('volume') if np.mean(corrs) > best_corr: best_corr = np.mean(corrs) if do_save: save_stuff('corr') stop_everything() if do_save: save_stuff(i) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def get_test_mols(args, mdp, num): samples = [] fps = [] early_stops = [] while len(samples) < num: if len(samples) % 5000 == 0: print(f'{len(samples)}/{num} mols have been sampled') m = BlockMoleculeDataExtended() min_blocks = args.min_blocks max_blocks = args.max_blocks early_stop_at = np.random.randint(min_blocks, max_blocks + 1) early_stops.append(early_stop_at) for t in range(max_blocks): if t == 0: length = mdp.num_blocks+1 else: length = len(m.stems)*mdp.num_blocks+1 action = np.random.randint(1, length) if t == early_stop_at: action = 0 if t >= min_blocks and action == 0: fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break else: action = max(0, action-1) action = (action % mdp.num_blocks, action // mdp.num_blocks) #print('..', action) m = mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break return samples def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args):
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug', action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/synthetic') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_samples", default=1000, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3') parser.add_argument("--scalar", default='WeightedSum', type=str) #TODO: other scalars parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1', type=str) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=30000, type=int) # 30k parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=16, type=int) parser.add_argument("--offline_mbsize", default=0, type=int) parser.add_argument("--hindsight_mbsize", default=0, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=0.8, type=float) parser.add_argument("--reward_exp", default=6, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=30, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8) ** 4, type=float) # (0.1/8)**8 parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--condition_type", default='HN', type=str) # 'HN', 'FiLM', 'concat' parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--ray_hidden_dim", default=100, type=int) parser.add_argument("--logit_clipping", default=0., type=float) return parser.parse_args() class RolloutWorker: def __init__(self, args, bpath, proxy, device): self.args = args self.test_split_rng = np.random.RandomState(142857) self.train_rng = np.random.RandomState(int(time.time())) self.mdp = MolMDPExtended(bpath) self.mdp.post_init(device, args.repr_type, include_nblocks=args.include_nblocks) self.mdp.build_translation_table() if args.floatX == 'float64': self.mdp.floatX = self.floatX = torch.double else: self.mdp.floatX = self.floatX = torch.float self.proxy = proxy self._device = device self.seen_molecules = set() self.stop_event = threading.Event() ####### # This is the "result", here a list of (reward, BlockMolDataExt, info...) tuples self.sampled_mols = [] self.online_mols = [] self.hindsight_mols = [] self.max_online_mols = 1000 self.max_hindsight_mols = 1000 self.min_blocks = args.min_blocks self.max_blocks = args.max_blocks self.mdp._cue_max_blocks = self.max_blocks self.reward_exp = args.reward_exp self.reward_min = args.reward_min self.reward_norm = args.reward_norm self.reward_exp_ramping = args.reward_exp_ramping self.random_action_prob = args.random_action_prob # If True this basically implements Buesing et al's TreeSample Q, # samples uniformly from it though, no MTCS involved if args.criterion == 'TB' or args.criterion == "Reinforce": self.ignore_parents = True elif args.criterion == 'FM': self.ignore_parents = False def rollout(self, generator, use_rand_policy=True, weights=None, replay=False): weights = Dirichlet(torch.ones(len(self.args.objectives))*self.args.alpha).sample_n(1).to( self.args.device) if weights is None else weights m = BlockMoleculeDataExtended() samples = [] max_blocks = self.max_blocks trajectory_stats = [] for t in range(max_blocks): s = self.mdp.mols2batch([self.mdp.mol2repr(m)]) s_o, m_o = generator(s, vec_data=weights, do_stems=True) # fix from run 330 onwards if t < self.min_blocks: m_o = m_o*0 - 1000 # prevent assigning prob to stop # when we can't stop ## logits = torch.cat([m_o.reshape(-1), s_o.reshape(-1)]) cat = torch.distributions.Categorical( logits=logits) action = cat.sample().item() if use_rand_policy and self.random_action_prob > 0: # just for training if self.train_rng.uniform() < self.random_action_prob: action = self.train_rng.randint( int(t < self.min_blocks), logits.shape[0]) q = torch.cat([m_o.reshape(-1), s_o.reshape(-1)]) trajectory_stats.append( (q[action].item(), action, torch.logsumexp(q, 0).item())) if t >= self.min_blocks and action == 0: r, raw_r = self._get_reward(m, weights) # r: reward, raw_r: scores for the objectives samples.append(((m,), ((-1, 0),), weights, weights, r, m, 1)) break else: action = max(0, action-1) action = (action % self.mdp.num_blocks, action // self.mdp.num_blocks) m_old = m m = self.mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions r, raw_r = self._get_reward(m, weights) if self.ignore_parents: samples.append( ((m_old,), (action,), weights, weights, r, m, 1)) else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat( len(parents), 1), weights, r, m, 1)) break else: if self.ignore_parents: samples.append( ((m_old,), (action,), weights, weights, 0, m, 0)) else: parents, actions = zip(*self.mdp.parents(m)) samples.append( (parents, actions, weights.repeat(len(parents), 1), weights, 0, m, 0)) p = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in samples[-1][0]]) qp = generator(p, weights.repeat(p.num_graphs, 1)) qsa_p = generator.model.index_output_by_action( p, qp[0], qp[1][:, 0], torch.tensor(samples[-1][1], device=self._device).long()) inflow = torch.logsumexp(qsa_p.flatten(), 0).item() self.sampled_mols.append( ([i.cpu().numpy() for i in raw_r], weights.cpu().numpy(), m, trajectory_stats, inflow)) if replay and self.args.hindsight_prob > 0.0: self._add_mol_to_replay(m) return samples def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get scores from oracle score = self.proxy.get_score([m]) score = torch.tensor(list(score.values())).to(self.args.device) if self.args.scalar == 'WeightedSum': raw_reward = (weights*score).sum() elif self.args.scalar == 'Tchebycheff': raw_reward = (weights*score).min() + 0.1 * (weights*score).sum() reward = self.l2r(raw_reward.clip(self.reward_min)) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, use_rand_policy=True): if self.args.condition_type is None: weights = self.test_weights # train specific model else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.trajectories_mbsize)), []) return zip(*samples) def sample2batch(self, mb): p, a, p_weights, weights, r, s, d, *o = mb mols = (p, s) # The batch index of each parent p_batch = torch.tensor(sum([[i]*len(p) for i, p in enumerate(p)], []), device=self._device).long() # Convert all parents and states to repr. Note that this # concatenates all the parent lists, which is why we need # p_batch p = self.mdp.mols2batch(list(map(self.mdp.mol2repr, sum(p, ())))) s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s]) # Concatenate all the actions (one per parent per sample) a = torch.tensor(sum(a, ()), device=self._device).long() # rewards and dones r = torch.tensor(r, device=self._device).to(self.floatX) d = torch.tensor(d, device=self._device).to(self.floatX) # weights p_w = torch.cat(p_weights, 0) w = torch.cat(weights, 0) return (p, p_batch, a, p_w, w, r, s, d, mols, *o) def l2r(self, raw_reward, t=0): if self.reward_exp_ramping > 0: reward_exp = 1 + (self.reward_exp - 1) * \ (1 - 1/(1 + t / self.reward_exp_ramping)) # when t=0, exp = 1; t->∞, exp = self.reward_exp else: reward_exp = self.reward_exp reward = (raw_reward/self.reward_norm)**reward_exp return reward def start_samplers(self, generator, n, dataset): self.ready_events = [threading.Event() for i in range(n)] self.resume_events = [threading.Event() for i in range(n)] self.results = [None] * n def f(idx): while not self.stop_event.is_set(): try: self.results[idx] = self.sample2batch( self.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) except Exception as e: print("Exception while sampling:") print(e) self.sampler_threads[idx].failed = True self.sampler_threads[idx].exception = e self.ready_events[idx].set() break self.ready_events[idx].set() self.resume_events[idx].clear() self.resume_events[idx].wait() self.sampler_threads = [threading.Thread( target=f, args=(i,)) for i in range(n)] [setattr(i, 'failed', False) for i in self.sampler_threads] [i.start() for i in self.sampler_threads] round_robin_idx = [0] def get(): while True: idx = round_robin_idx[0] round_robin_idx[0] = (round_robin_idx[0] + 1) % n if self.ready_events[idx].is_set(): r = self.results[idx] self.ready_events[idx].clear() self.resume_events[idx].set() return r elif round_robin_idx[0] == 0: time.sleep(0.001) return get def stop_samplers_and_join(self): self.stop_event.set() if hasattr(self, 'sampler_threads'): while any([i.is_alive() for i in self.sampler_threads]): [i.set() for i in self.resume_events] [i.join(0.05) for i in self.sampler_threads] def train_generative_model_with_oracle(args, generator, bpath, oracle, test_weights, dataset=None, do_save=False): print("Training generator...") device = args.device rollout_worker = RolloutWorker(args, bpath, oracle, device) if args.condition_type is None: rollout_worker.test_weights = torch.tensor(test_weights).to(device)[args.run :args.run+1] else: rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.test_mols = pickle.load(gzip.open('./data/test_mols_6062.pkl.gz', 'rb')) def save_stuff(iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, '{}_generator_checkpoint.pth'.format(iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] best_hv = 0 best_corr = 0 time_last_check = time.time() for i in range(args.num_iterations + 1): rollout_worker.reward_exp = 1 + (args.reward_exp-1) * (1-1/(1+i/20)) if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(loss), 3) for loss in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/train', train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, diversity = evaluate(args, generator, rollout_worker, 100) corrs = compute_correlation(args, generator, rollout_worker, rollout_worker.test_mols) args.logger.add_scalar( 'Top-100-sampled/volumes', volume, use_context=False) args.logger.add_scalar( 'Top-100-sampled/dists', diversity, use_context=False) args.logger.add_scalar( 'Top-100-sampled/corr', np.mean(corrs), use_context=False) if do_save: save_stuff(i) if volume > best_hv: best_hv = volume if do_save: save_stuff('volume') if np.mean(corrs) > best_corr: best_corr = np.mean(corrs) if do_save: save_stuff('corr') stop_everything() if do_save: save_stuff(i) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def get_test_mols(args, mdp, num): samples = [] fps = [] early_stops = [] while len(samples) < num: if len(samples) % 5000 == 0: print(f'{len(samples)}/{num} mols have been sampled') m = BlockMoleculeDataExtended() min_blocks = args.min_blocks max_blocks = args.max_blocks early_stop_at = np.random.randint(min_blocks, max_blocks + 1) early_stops.append(early_stop_at) for t in range(max_blocks): if t == 0: length = mdp.num_blocks+1 else: length = len(m.stems)*mdp.num_blocks+1 action = np.random.randint(1, length) if t == early_stop_at: action = 0 if t >= min_blocks and action == 0: fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break else: action = max(0, action-1) action = (action % mdp.num_blocks, action // mdp.num_blocks) #print('..', action) m = mdp.add_block_to(m, *action) if len(m.blocks) and not len(m.stems) or t == max_blocks - 1: # can't add anything more to this mol so let's make it # terminal. Note that this node's parent isn't just m, # because this is a sink for all parent transitions fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048) if len(samples)==0: samples.append(m) fps.append(fp) else: sims = DataStructs.BulkTanimotoSimilarity(fp, fps) if max(sims) < 0.7: samples.append(m) fps.append(fp) break return samples def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args):
set_random_seed(args.seed)
13
2023-10-24 14:10:35+00:00
16k
SALT-NLP/Efficient_Unlearning
src/models/transformers/parameter-efficient-finetuning/wrappers/configuration.py
[ { "identifier": "PretrainedConfig", "path": "src/models/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for loading/downloading/saving configurations.\n\n <Tip>\n\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to\n initialize a model does **not** load the model weights. It only affects the model's configuration.\n\n </Tip>\n\n Class attributes (overridden by derived classes):\n\n - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate\n the correct object in [`~transformers.AutoConfig`].\n - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the\n config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:\n [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].\n - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary\n outputs of the model during inference.\n - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized\n naming of attributes.\n\n Common attributes (present in all subclasses):\n\n - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the\n embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).\n - **hidden_size** (`int`) -- The hidden size of the model.\n - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the\n model.\n - **num_hidden_layers** (`int`) -- The number of blocks in the model.\n\n Arg:\n name_or_path (`str`, *optional*, defaults to `\"\"`):\n Store the string that was passed to [`PreTrainedModel.from_pretrained`] or\n [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created\n with such a method.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not the model should return all hidden-states.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not the model should returns all attentions.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.\n is_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as an encoder/decoder or not.\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as decoder or not (in which case it's used as an encoder).\n cross_attention_hidden_size** (`bool`, *optional*):\n The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder\n setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.\n add_cross_attention (`bool`, *optional*, defaults to `False`):\n Whether cross-attention layers should be added to the model. Note, this option is only relevant for models\n that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models\n in `AUTO_MODELS_FOR_CAUSAL_LM`.\n tie_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder\n and decoder model to have the exact same parameter names.\n prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):\n Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of\n heads to prune in said layer.\n\n For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n chunk_size_feed_forward (`int`, *optional*, defaults to `0`):\n The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that\n the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <\n sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed\n Forward Chunking work?](../glossary.html#feed-forward-chunking).\n\n > Parameters for sequence generation\n\n max_length (`int`, *optional*, defaults to 20):\n Maximum length that will be used by default in the `generate` method of the model.\n min_length (`int`, *optional*, defaults to 10):\n Minimum length that will be used by default in the `generate` method of the model.\n do_sample (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;\n use greedy decoding otherwise.\n early_stopping (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search\n when at least `num_beams` sentences are finished per batch or not.\n num_beams (`int`, *optional*, defaults to 1):\n Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means\n no beam search.\n num_beam_groups (`int`, *optional*, defaults to 1):\n Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams\n that will be used by default in the `generate` method of the model. 1 means no group beam search.\n diversity_penalty (`float`, *optional*, defaults to 0.0):\n Value to control diversity for group beam search. that will be used by default in the `generate` method of\n the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.\n temperature (`float`, *optional*, defaults to 1):\n The value used to module the next token probabilities that will be used by default in the `generate` method\n of the model. Must be strictly positive.\n top_k (`int`, *optional*, defaults to 50):\n Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in\n the `generate` method of the model.\n top_p (`float`, *optional*, defaults to 1):\n Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,\n only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.\n repetition_penalty (`float`, *optional*, defaults to 1):\n Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0\n means no penalty.\n length_penalty (`float`, *optional*, defaults to 1):\n Exponential penalty to the length that will be used by default in the `generate` method of the model.\n no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the\n `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can\n only occur once.\n encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by\n default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all\n ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.\n bad_words_ids (`List[int]`, *optional*):\n List of token ids that are not allowed to be generated that will be used by default in the `generate`\n method of the model. In order to get the tokens of the words that should not appear in the generated text,\n use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n num_return_sequences (`int`, *optional*, defaults to 1):\n Number of independently computed returned sequences for each element in the batch that will be used by\n default in the `generate` method of the model.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether the model should return the logits when used for generation.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.\n forced_bos_token_id (`int`, *optional*):\n The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for\n multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target\n language token.\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached.\n remove_invalid_values (`bool`, *optional*):\n Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.\n Note that using `remove_invalid_values` can slow down generation.\n\n > Parameters for fine-tuning tasks\n\n architectures (`List[str]`, *optional*):\n Model architectures that can be used with the model pretrained weights.\n finetuning_task (`str`, *optional*):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow\n or PyTorch) checkpoint.\n id2label (`Dict[int, str]`, *optional*):\n A map from index (for instance prediction index, or target index) to label.\n label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.\n num_labels (`int`, *optional*):\n Number of labels to use in the last layer added to the model, typically for a classification task.\n task_specific_params (`Dict[str, Any]`, *optional*):\n Additional keyword arguments to store for the current task.\n problem_type (`str`, *optional*):\n Problem type for `XxxForSequenceClassification` models. Can be one of `\"regression\"`,\n `\"single_label_classification\"` or `\"multi_label_classification\"`.\n\n > Parameters linked to the tokenizer\n\n tokenizer_class (`str`, *optional*):\n The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the\n model by default).\n prefix (`str`, *optional*):\n A specific prompt that should be added at the beginning of each text before calling the model.\n bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.\n pad_token_id (`int`, *optional*): The id of the _padding_ token.\n eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.\n decoder_start_token_id (`int`, *optional*):\n If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.\n sep_token_id (`int`, *optional*): The id of the _separation_ token.\n\n > PyTorch specific parameters\n\n torchscript (`bool`, *optional*, defaults to `False`):\n Whether or not the model should be used with Torchscript.\n tie_word_embeddings (`bool`, *optional*, defaults to `True`):\n Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the\n model has a output word embedding layer.\n torch_dtype (`str`, *optional*):\n The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`\n (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved\n model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load\n `float16` weights. Since the config object is stored in plain text, this attribute contains just the\n floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the\n `\"float16\"` string.\n\n This attribute is currently not being used during model loading time, but this may change in the future\n versions. But we can already start preparing for the future by saving the dtype with save_pretrained.\n\n > TensorFlow specific parameters\n\n use_bfloat16 (`bool`, *optional*, defaults to `False`):\n Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).\n tf_legacy_loss (`bool`, *optional*, defaults to `False`):\n Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may\n not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers\n v5.\n \"\"\"\n model_type: str = \"\"\n is_composition: bool = False\n attribute_map: Dict[str, str] = {}\n _auto_class: Optional[str] = None\n\n def __setattr__(self, key, value):\n if key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n super().__setattr__(key, value)\n\n def __getattribute__(self, key):\n if key != \"attribute_map\" and key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n return super().__getattribute__(key)\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.return_dict = kwargs.pop(\"return_dict\", True)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.torch_dtype = kwargs.pop(\"torch_dtype\", None) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.tf_legacy_loss = kwargs.pop(\"tf_legacy_loss\", False) # Only used by TensorFlow models\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n self.tie_word_embeddings = kwargs.pop(\n \"tie_word_embeddings\", True\n ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_encoder_decoder = kwargs.pop(\"is_encoder_decoder\", False)\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n self.cross_attention_hidden_size = kwargs.pop(\"cross_attention_hidden_size\", None)\n self.add_cross_attention = kwargs.pop(\"add_cross_attention\", False)\n self.tie_encoder_decoder = kwargs.pop(\"tie_encoder_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.min_length = kwargs.pop(\"min_length\", 0)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.early_stopping = kwargs.pop(\"early_stopping\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.num_beam_groups = kwargs.pop(\"num_beam_groups\", 1)\n self.diversity_penalty = kwargs.pop(\"diversity_penalty\", 0.0)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.typical_p = kwargs.pop(\"typical_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.no_repeat_ngram_size = kwargs.pop(\"no_repeat_ngram_size\", 0)\n self.encoder_no_repeat_ngram_size = kwargs.pop(\"encoder_no_repeat_ngram_size\", 0)\n self.bad_words_ids = kwargs.pop(\"bad_words_ids\", None)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n self.chunk_size_feed_forward = kwargs.pop(\"chunk_size_feed_forward\", 0)\n self.output_scores = kwargs.pop(\"output_scores\", False)\n self.return_dict_in_generate = kwargs.pop(\"return_dict_in_generate\", False)\n self.forced_bos_token_id = kwargs.pop(\"forced_bos_token_id\", None)\n self.forced_eos_token_id = kwargs.pop(\"forced_eos_token_id\", None)\n self.remove_invalid_values = kwargs.pop(\"remove_invalid_values\", False)\n self.exponential_decay_length_penalty = kwargs.pop(\"exponential_decay_length_penalty\", None)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.id2label = kwargs.pop(\"id2label\", None)\n self.label2id = kwargs.pop(\"label2id\", None)\n if self.id2label is not None:\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None and len(self.id2label) != num_labels:\n logger.warning(\n f\"You passed along `num_labels={num_labels}` with an incompatible id to label map: \"\n f\"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}.\"\n )\n self.id2label = dict((int(key), value) for key, value in self.id2label.items())\n # Keys are always strings in JSON so convert ids to int here.\n else:\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n\n if self.torch_dtype is not None and isinstance(self.torch_dtype, str):\n # we will start using self.torch_dtype in v5, but to be consistent with\n # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object\n if is_torch_available():\n import torch\n\n self.torch_dtype = getattr(torch, self.torch_dtype)\n\n # Tokenizer arguments TODO: eventually tokenizer and models should share the same config\n self.tokenizer_class = kwargs.pop(\"tokenizer_class\", None)\n self.prefix = kwargs.pop(\"prefix\", None)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_id = kwargs.pop(\"eos_token_id\", None)\n self.sep_token_id = kwargs.pop(\"sep_token_id\", None)\n\n self.decoder_start_token_id = kwargs.pop(\"decoder_start_token_id\", None)\n\n # task specific arguments\n self.task_specific_params = kwargs.pop(\"task_specific_params\", None)\n\n # regression / multi-label classification\n self.problem_type = kwargs.pop(\"problem_type\", None)\n allowed_problem_types = (\"regression\", \"single_label_classification\", \"multi_label_classification\")\n if self.problem_type is not None and self.problem_type not in allowed_problem_types:\n raise ValueError(\n f\"The config parameter `problem_type` was not understood: received {self.problem_type} \"\n \"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid.\"\n )\n\n # TPU arguments\n if kwargs.pop(\"xla_device\", None) is not None:\n logger.warning(\n \"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can \"\n \"safely remove it from your `config.json` file.\"\n )\n\n # Name or path to the pretrained checkpoint\n self._name_or_path = str(kwargs.pop(\"name_or_path\", \"\"))\n\n # Drop the transformers version info\n self.transformers_version = kwargs.pop(\"transformers_version\", None)\n\n # Deal with gradient checkpointing\n if kwargs.get(\"gradient_checkpointing\", False):\n warnings.warn(\n \"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 \"\n \"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the \"\n \"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.\"\n )\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n @property\n def name_or_path(self) -> str:\n return getattr(self, \"_name_or_path\", None)\n\n @name_or_path.setter\n def name_or_path(self, value):\n self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)\n\n @property\n def use_return_dict(self) -> bool:\n \"\"\"\n `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.\n \"\"\"\n # If torchscript is set, force `return_dict=False` to avoid jit errors\n return self.return_dict and not self.torchscript\n\n @property\n def num_labels(self) -> int:\n \"\"\"\n `int`: The number of labels for classification models.\n \"\"\"\n return len(self.id2label)\n\n @num_labels.setter\n def num_labels(self, num_labels: int):\n if not hasattr(self, \"id2label\") or self.id2label is None or len(self.id2label) != num_labels:\n self.id2label = {i: f\"LABEL_{i}\" for i in range(num_labels)}\n self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~PretrainedConfig.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it.\n\n <Tip warning={true}>\n\n Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,\n which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing\n folder. Pass along `temp_dir=True` to use a temporary directory instead.\n\n </Tip>\n\n kwargs:\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n repo = self._create_or_get_repo(save_directory, **kwargs)\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be\n # loaded from the Hub.\n if self._auto_class is not None:\n custom_object_save(self, save_directory, config=self)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file, use_diff=True)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n url = self._push_to_hub(repo, commit_message=commit_message)\n logger.info(f\"Configuration pushed to the hub in this commit: {url}\")\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> \"PretrainedConfig\":\n r\"\"\"\n Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained model configuration hosted inside a model repo on\n huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or\n namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.\n - a path to a *directory* containing a configuration file saved using the\n [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force to (re-)download the configuration files and override the cached versions if\n they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received file. Attempts to resume the download if such a file\n exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n If `False`, then this function returns just the final configuration object.\n\n If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the\n part of `kwargs` which has not been used to update `config` and is otherwise ignored.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can\n specify the folder name here.\n kwargs (`Dict[str, Any]`, *optional*):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled\n by the `return_unused_kwargs` keyword parameter.\n\n <Tip>\n\n Passing `use_auth_token=True` is required when you want to use a private model.\n\n </Tip>\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.\n\n Examples:\n\n ```python\n # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained(\n \"bert-base-uncased\"\n ) # Download configuration from huggingface.co and cache.\n config = BertConfig.from_pretrained(\n \"./test/saved_model/\"\n ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*\n config = BertConfig.from_pretrained(\"./test/saved_model/my_configuration.json\")\n config = BertConfig.from_pretrained(\"bert-base-uncased\", output_attentions=True, foo=False)\n assert config.output_attentions == True\n config, unused_kwargs = BertConfig.from_pretrained(\n \"bert-base-uncased\", output_attentions=True, foo=False, return_unused_kwargs=True\n )\n assert config.output_attentions == True\n assert unused_kwargs == {\"foo\": False}\n ```\"\"\"\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\n [`PretrainedConfig`] using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n\n Returns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.\n\n \"\"\"\n original_kwargs = copy.deepcopy(kwargs)\n # Get config dict associated with the base config file\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n\n # That config file may point us toward another config file to use.\n if \"configuration_files\" in config_dict:\n configuration_file = get_configuration_file(config_dict[\"configuration_files\"])\n config_dict, kwargs = cls._get_config_dict(\n pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs\n )\n\n return config_dict, kwargs\n\n @classmethod\n def _get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n trust_remote_code = kwargs.pop(\"trust_remote_code\", None)\n subfolder = kwargs.pop(\"subfolder\", \"\")\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n\n if trust_remote_code is True:\n logger.warning(\n \"The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is\"\n \" ignored.\"\n )\n\n user_agent = {\"file_type\": \"config\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n if is_offline_mode() and not local_files_only:\n logger.info(\"Offline mode: forcing local_files_only=True\")\n local_files_only = True\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)) or is_remote_url(\n pretrained_model_name_or_path\n ):\n config_file = pretrained_model_name_or_path\n else:\n configuration_file = kwargs.pop(\"_configuration_file\", CONFIG_NAME)\n\n if os.path.isdir(os.path.join(pretrained_model_name_or_path, subfolder)):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, configuration_file)\n else:\n config_file = hf_bucket_url(\n pretrained_model_name_or_path,\n filename=configuration_file,\n revision=revision,\n subfolder=subfolder if len(subfolder) > 0 else None,\n mirror=None,\n )\n\n try:\n # Load from URL or cache if already cached\n resolved_config_file = cached_path(\n config_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n )\n\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier listed on \"\n \"'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a token having \"\n \"permission to this repo with `use_auth_token` or log in with `huggingface-cli login` and pass \"\n \"`use_auth_token=True`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this \"\n f\"model name. Check the model page at 'https://huggingface.co/{pretrained_model_name_or_path}' for \"\n \"available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {configuration_file}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n f\"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in\"\n f\" the cached files and it looks like {pretrained_model_name_or_path} is not the path to a directory\"\n f\" containing a {configuration_file} file.\\nCheckout your internet connection or see how to run the\"\n \" library in offline mode at 'https://huggingface.co/docs/transformers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {configuration_file} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(resolved_config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(\n f\"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.\"\n )\n\n if resolved_config_file == config_file:\n logger.info(f\"loading configuration file {config_file}\")\n else:\n logger.info(f\"loading configuration file {config_file} from cache at {resolved_config_file}\")\n\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.\n\n Args:\n config_dict (`Dict[str, Any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be\n retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.\n kwargs (`Dict[str, Any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from those parameters.\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n # Those arguments may be passed along for our internal telemetry.\n # We remove them so they don't appear in `return_unused_kwargs`.\n kwargs.pop(\"_from_auto\", None)\n kwargs.pop(\"_from_pipeline\", None)\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())\n\n # Update config with kwargs if needed\n if \"num_labels\" in kwargs and \"id2label\" in kwargs:\n num_labels = kwargs[\"num_labels\"]\n id2label = kwargs[\"id2label\"] if kwargs[\"id2label\"] is not None else []\n if len(id2label) != num_labels:\n raise ValueError(\n f\"You passed along `num_labels={num_labels }` with an incompatible id to label map: \"\n f\"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove \"\n \"one of them.\"\n )\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n setattr(config, key, value)\n if key != \"torch_dtype\":\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(f\"Model config {config}\")\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: Union[str, os.PathLike]) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.\n\n Args:\n json_file (`str` or `os.PathLike`):\n Path to the JSON file containing the parameters.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from that JSON file.\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return self.__dict__ == other.__dict__\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n def to_diff_dict(self) -> Dict[str, Any]:\n \"\"\"\n Removes all attributes from config which correspond to the default config attributes for better readability and\n serializes to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n config_dict = self.to_dict()\n\n # get the default config dict\n default_config_dict = PretrainedConfig().to_dict()\n\n # get class specific config dict\n class_config_dict = self.__class__().to_dict() if not self.is_composition else {}\n\n serializable_config_dict = {}\n\n # only serialize values that differ from the default config\n for key, value in config_dict.items():\n if (\n key not in default_config_dict\n or key == \"transformers_version\"\n or value != default_config_dict[key]\n or (key in class_config_dict and value != class_config_dict[key])\n ):\n serializable_config_dict[key] = value\n\n self.dict_torch_dtype_to_str(serializable_config_dict)\n\n return serializable_config_dict\n\n def adapters_to_dict(self, output):\n # Adapter-specific changes\n if hasattr(self, \"adapters\") and not isinstance(output[\"adapters\"], dict):\n output[\"adapters\"] = self.adapters.to_dict()\n if \"custom_heads\" in output:\n del output[\"custom_heads\"]\n if \"is_adaptable\" in output:\n del output[\"is_adaptable\"]\n return output\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n if \"_auto_class\" in output:\n del output[\"_auto_class\"]\n\n # Transformers version when serializing the model\n output[\"transformers_version\"] = __version__\n\n self.dict_torch_dtype_to_str(output)\n\n self.adapters_to_dict(output)\n\n return output\n\n def to_json_string(self, use_diff: bool = True) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Args:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON file.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string(use_diff=use_diff))\n\n def update(self, config_dict: Dict[str, Any]):\n \"\"\"\n Updates attributes of this class with attributes from `config_dict`.\n\n Args:\n config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.\n \"\"\"\n for key, value in config_dict.items():\n setattr(self, key, value)\n\n def update_from_string(self, update_str: str):\n \"\"\"\n Updates attributes of this class with attributes from `update_str`.\n\n The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:\n \"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\n\n The keys to change have to already exist in the config object.\n\n Args:\n update_str (`str`): String with attributes that should be updated for this class.\n\n \"\"\"\n\n d = dict(x.split(\"=\") for x in update_str.split(\",\"))\n for k, v in d.items():\n if not hasattr(self, k):\n raise ValueError(f\"key {k} isn't in the original config dict\")\n\n old_v = getattr(self, k)\n if isinstance(old_v, bool):\n if v.lower() in [\"true\", \"1\", \"y\", \"yes\"]:\n v = True\n elif v.lower() in [\"false\", \"0\", \"n\", \"no\"]:\n v = False\n else:\n raise ValueError(f\"can't derive true or false from {v} (key {k})\")\n elif isinstance(old_v, int):\n v = int(v)\n elif isinstance(old_v, float):\n v = float(v)\n elif not isinstance(old_v, str):\n raise ValueError(\n f\"You can only update int, float, bool or string values in the config, got {v} for key {k}\"\n )\n\n setattr(self, k, v)\n\n def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:\n \"\"\"\n Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,\n converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *\"float32\"*\n string, which can then be stored in the json format.\n \"\"\"\n if d.get(\"torch_dtype\", None) is not None and not isinstance(d[\"torch_dtype\"], str):\n d[\"torch_dtype\"] = str(d[\"torch_dtype\"]).split(\".\")[1]\n for value in d.values():\n if isinstance(value, dict):\n self.dict_torch_dtype_to_str(value)\n\n @classmethod\n def register_for_auto_class(cls, auto_class=\"AutoConfig\"):\n \"\"\"\n Register this class with a given auto class. This should only be used for custom configurations as the ones in\n the library are already mapped with `AutoConfig`.\n\n <Tip warning={true}>\n\n This API is experimental and may have some slight breaking changes in the next releases.\n\n </Tip>\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoConfig\"`):\n The auto class to register this new configuration with.\n \"\"\"\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class" }, { "identifier": "EncoderDecoderConfig", "path": "src/models/transformers/models/encoder_decoder/configuration_encoder_decoder.py", "snippet": "class EncoderDecoderConfig(PretrainedConfig):\n r\"\"\"\n [`EncoderDecoderConfig`] is the configuration class to store the configuration of a [`EncoderDecoderModel`]. It is\n used to instantiate an Encoder Decoder model according to the specified arguments, defining the encoder and decoder\n configs.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n kwargs (*optional*):\n Dictionary of keyword arguments. Notably:\n\n - **encoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines\n the encoder config.\n - **decoder** ([`PretrainedConfig`], *optional*) -- An instance of a configuration object that defines\n the decoder config.\n\n Examples:\n\n ```python\n >>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel\n\n >>> # Initializing a BERT bert-base-uncased style configuration\n >>> config_encoder = BertConfig()\n >>> config_decoder = BertConfig()\n\n >>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)\n\n >>> # Initializing a Bert2Bert model from the bert-base-uncased style configurations\n >>> model = EncoderDecoderModel(config=config)\n\n >>> # Accessing the model configuration\n >>> config_encoder = model.config.encoder\n >>> config_decoder = model.config.decoder\n >>> # set decoder config to causal lm\n >>> config_decoder.is_decoder = True\n >>> config_decoder.add_cross_attention = True\n\n >>> # Saving the model, including its configuration\n >>> model.save_pretrained(\"my-model\")\n\n >>> # loading model and config from pretrained folder\n >>> encoder_decoder_config = EncoderDecoderConfig.from_pretrained(\"my-model\")\n >>> model = EncoderDecoderModel.from_pretrained(\"my-model\", config=encoder_decoder_config)\n ```\"\"\"\n model_type = \"encoder-decoder\"\n is_composition = True\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n assert (\n \"encoder\" in kwargs and \"decoder\" in kwargs\n ), \"Config has to be initialized with encoder and decoder config\"\n encoder_config = kwargs.pop(\"encoder\")\n encoder_model_type = encoder_config.pop(\"model_type\")\n decoder_config = kwargs.pop(\"decoder\")\n decoder_model_type = decoder_config.pop(\"model_type\")\n\n from ..auto.configuration_auto import AutoConfig\n\n self.encoder = AutoConfig.for_model(encoder_model_type, **encoder_config)\n self.decoder = AutoConfig.for_model(decoder_model_type, **decoder_config)\n self.is_encoder_decoder = True\n\n @classmethod\n def from_encoder_decoder_configs(\n cls, encoder_config: PretrainedConfig, decoder_config: PretrainedConfig, **kwargs\n ) -> PretrainedConfig:\n r\"\"\"\n Instantiate a [`EncoderDecoderConfig`] (or a derived class) from a pre-trained encoder model configuration and\n decoder model configuration.\n\n Returns:\n [`EncoderDecoderConfig`]: An instance of a configuration object\n \"\"\"\n logger.info(\"Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config\")\n decoder_config.is_decoder = True\n decoder_config.add_cross_attention = True\n\n return cls(encoder=encoder_config.to_dict(), decoder=decoder_config.to_dict(), **kwargs)\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default *to_dict()* from *PretrainedConfig*.\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"encoder\"] = self.encoder.to_dict()\n output[\"decoder\"] = self.decoder.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n\n self.adapters_to_dict(output)\n\n return output" }, { "identifier": "ModelAdaptersConfig", "path": "src/models/transformers/parameter-efficient-finetuning/configuration.py", "snippet": "class ModelAdaptersConfig(Collection):\n \"\"\"This class manages the setup and configuration of adapter modules in a pre-trained model.\"\"\"\n\n def __init__(self, **kwargs):\n adapters_list = kwargs.pop(\"adapters\", {})\n # this is for backwards compability: in v1.x, self.adapters values had shape (<type>, <config_name>)\n adapters_list = dict(\n map(lambda t: (t[0], t[1][1] or t[1][0] if isinstance(t[1], tuple) else t[1]), adapters_list.items())\n )\n self.adapters: Mapping[str, str] = adapters_list\n self.config_map = kwargs.pop(\"config_map\", {})\n\n self.fusions: Mapping[str, str] = kwargs.pop(\"fusions\", {})\n self.fusion_config_map = kwargs.pop(\"fusion_config_map\", {})\n\n # TODO-V2 Save this with config?\n self.active_setup: Optional[AdapterCompositionBlock] = None\n self.skip_layers = None\n\n def __contains__(self, item):\n return item in self.adapters.keys()\n\n def __iter__(self):\n return iter(self.adapters)\n\n def __len__(self):\n return len(self.adapters)\n\n def get(self, adapter_name: str) -> Optional[dict]:\n \"\"\"\n Gets the config dictionary for a given adapter.\n\n Args:\n adapter_name (str): The name of the adapter.\n\n Returns:\n Mapping: The adapter configuration.\n \"\"\"\n if adapter_name in self.adapters:\n config_name = self.adapters[adapter_name]\n if config_name in self.config_map:\n config = self.config_map.get(config_name, None)\n else:\n config = ADAPTER_CONFIG_MAP.get(config_name, None)\n if isinstance(config, str):\n config = ADAPTER_CONFIG_MAP[config]\n else:\n config = None\n return config\n\n def match(\n self,\n adapter_name: str,\n config_type: type,\n layer_idx: Optional[int] = None,\n location_key: Optional[str] = None,\n ) -> Optional[dict]:\n \"\"\"\n Tries to match the given criteria to an existing adapter. Return the adapter config if a match is found,\n otherwise None.\n \"\"\"\n config = self.get(adapter_name)\n if config is None:\n return None\n elif not isinstance(config, AdapterConfigBase):\n config = AdapterConfigBase.load(config)\n\n if isinstance(config, config_type):\n leave_out = config.get(\"leave_out\", [])\n if layer_idx is None or layer_idx not in leave_out:\n if location_key is None or config.get(location_key, False):\n return config\n # if we have a config union, match with all child configs\n elif isinstance(config, ConfigUnion):\n results = []\n for c in config.configs:\n if isinstance(c, config_type):\n leave_out = c.get(\"leave_out\", [])\n if layer_idx is None or layer_idx not in leave_out:\n if location_key is None or c.get(location_key, False):\n results.append(c)\n if len(results) == 1:\n return results[0]\n elif len(results) > 1:\n raise ValueError(\n \"Multiple adapter definitions conflict for adapter '{}' in layer {}. \"\n \"Please make sure there is only one adaptation block used per location and adapter.\".format(\n adapter_name, layer_idx\n )\n )\n\n return None\n\n def add(self, adapter_name: str, config: Optional[Union[str, dict]] = None):\n \"\"\"\n Adds a new adapter of the name to the model config.\n\n Args:\n adapter_name (str): The name of the adapter.\n config (Optional[Union[str, dict]], optional): The adapter config. Defaults to None.\n \"\"\"\n if adapter_name in self.adapters:\n raise ValueError(f\"An adapter with the name '{adapter_name}' has already been added.\")\n if config is None:\n config = DEFAULT_ADAPTER_CONFIG\n if isinstance(config, str):\n if config not in ADAPTER_CONFIG_MAP and config not in self.config_map:\n raise ValueError(f\"Invalid adapter config identifier '{config}'.\")\n config_name = config\n # if it's a dict, compute it's hash and add a new entry to the config map\n elif isinstance(config, Mapping):\n config_name = get_adapter_config_hash(config)\n self.config_map[config_name] = AdapterConfigBase.load(config)\n else:\n raise ValueError(\"Invalid adapter config: {}\".format(config))\n self.adapters[adapter_name] = config_name\n logger.info(f\"Adding adapter '{adapter_name}'.\")\n\n def get_fusion(self, fusion_name: Union[str, List[str]]) -> Optional[dict]:\n \"\"\"\n Gets the config dictionary for a given AdapterFusion.\n\n Args:\n fusion_name (Union[str, List[str]]): The name of the AdapterFusion or the adapters to fuse.\n\n Returns:\n Optional[dict]: The AdapterFusion configuration.\n \"\"\"\n if isinstance(fusion_name, list):\n fusion_name = \",\".join(fusion_name)\n if fusion_name in self.fusions:\n config_name = self.fusions[fusion_name]\n if config_name in self.fusion_config_map:\n config = self.fusion_config_map.get(config_name, None)\n else:\n config = ADAPTERFUSION_CONFIG_MAP.get(config_name, None)\n else:\n config = None\n return config\n\n def add_fusion(self, fusion_name: Union[str, List[str]], config: Optional[Union[str, dict]] = None):\n \"\"\"\n Adds a new AdapterFusion.\n\n Args:\n fusion_name (Union[str, List[str]]): The name of the AdapterFusion or the adapters to fuse.\n config (Optional[Union[str, dict]], optional): AdapterFusion config. Defaults to None.\n \"\"\"\n if isinstance(fusion_name, list):\n fusion_name = \",\".join(fusion_name)\n if fusion_name in self.fusions:\n raise ValueError(f\"An AdapterFusion with the name '{fusion_name}' has already been added.\")\n if config is None:\n config = DEFAULT_ADAPTERFUSION_CONFIG\n if isinstance(config, str):\n if config not in ADAPTERFUSION_CONFIG_MAP and config not in self.fusion_config_map:\n raise ValueError(f\"Invalid AdapterFusion config identifier '{config}'.\")\n config_name = config\n # if it's a dict, compute it's hash and add a new entry to the config map\n elif isinstance(config, Mapping):\n config_name = get_adapter_config_hash(config)\n self.fusion_config_map[config_name] = config\n else:\n raise ValueError(\"Invalid AdapterFusion config: {}\".format(config))\n self.fusions[fusion_name] = config_name\n logger.info(f\"Adding AdapterFusion '{fusion_name}'.\")\n\n def common_config_value(self, adapter_names: list, attribute: str):\n \"\"\"\n Checks whether all adapters in a list share the same config setting for a given attribute and returns the\n shared value.\n\n Args:\n adapter_names (list): The adapters to check.\n attribute (str): The config attribute to check.\n \"\"\"\n common_value = None\n for i, name in enumerate(adapter_names):\n config = self.get(name)\n if not config:\n raise ValueError(\n f\"No adapter with name '{name}' found. Make sure that an adapter with this name is loaded.\"\n )\n config_value = config.get(attribute, None)\n if i > 0 and config_value != common_value:\n raise ValueError(f\"All given adapters must define the same value for config attribute {attribute}.\")\n common_value = config_value\n return common_value\n\n def to_dict(self):\n output_dict = {}\n output_dict[\"adapters\"] = copy.deepcopy(self.adapters)\n output_dict[\"config_map\"] = {}\n for k, v in self.config_map.items():\n if isinstance(v, AdapterConfigBase):\n output_dict[\"config_map\"][k] = v.to_dict()\n else:\n output_dict[\"config_map\"][k] = copy.deepcopy(v)\n output_dict[\"fusions\"] = copy.deepcopy(self.fusions)\n output_dict[\"fusion_config_map\"] = {}\n for k, v in self.fusion_config_map.items():\n if isinstance(v, AdapterConfigBase):\n output_dict[\"fusion_config_map\"][k] = v.to_dict()\n else:\n output_dict[\"fusion_config_map\"][k] = copy.deepcopy(v)\n return output_dict" } ]
from ...configuration_utils import PretrainedConfig from ...models.encoder_decoder.configuration_encoder_decoder import EncoderDecoderConfig from ..configuration import ModelAdaptersConfig
14,385
CONFIG_CLASS_KEYS_MAPPING = { "bart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "beit": {}, "bert": {}, "distilbert": { "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "gpt2": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "gptj": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "mbart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "roberta": {}, "t5": { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "hidden_dropout_prob": "dropout_rate", "attention_probs_dropout_prob": "dropout_rate", }, "vit": {}, "xlm_roberta": {}, } def wrap_config(config: PretrainedConfig) -> PretrainedConfig: """ Makes required changes to a model config class to allow usage with adapters. Args: config (PretrainedConfig): The config to be wrapped. Returns: PretrainedConfig: The same config object, with modifications applied. """ if getattr(config, "is_adaptable", False): return config # Init ModelAdaptersConfig if not hasattr(config, "adapters"):
CONFIG_CLASS_KEYS_MAPPING = { "bart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "beit": {}, "bert": {}, "distilbert": { "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "gpt2": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "gptj": { "hidden_dropout_prob": "resid_pdrop", "attention_probs_dropout_prob": "attn_pdrop", }, "mbart": { "num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model", "hidden_dropout_prob": "dropout", "attention_probs_dropout_prob": "attention_dropout", }, "roberta": {}, "t5": { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", "hidden_dropout_prob": "dropout_rate", "attention_probs_dropout_prob": "dropout_rate", }, "vit": {}, "xlm_roberta": {}, } def wrap_config(config: PretrainedConfig) -> PretrainedConfig: """ Makes required changes to a model config class to allow usage with adapters. Args: config (PretrainedConfig): The config to be wrapped. Returns: PretrainedConfig: The same config object, with modifications applied. """ if getattr(config, "is_adaptable", False): return config # Init ModelAdaptersConfig if not hasattr(config, "adapters"):
config.adapters = ModelAdaptersConfig()
2
2023-10-18 18:05:54+00:00
16k
nchen909/Pass-Tuning
models_list/adapter/modeling_auto.py
[ { "identifier": "PLBartForConditionalGeneration", "path": "models_list/adapter/modeling_plbart.py", "snippet": "class PLBartForConditionalGeneration(PLBartPreTrainedModel):\n base_model_prefix = \"model\"\n _keys_to_ignore_on_load_missing = [\n r\"final_logits_bias\",\n r\"encoder.version\",\n r\"decoder.version\",\n r\"lm_head.weight\",\n ]\n\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n self.model = PLBartModel(config)\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)\n\n self.init_weights()\n\n def get_encoder(self):\n return self.model.get_encoder()\n\n def get_decoder(self):\n return self.model.get_decoder()\n\n def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:\n new_embeddings = super().resize_token_embeddings(new_num_tokens)\n self._resize_final_logits_bias(new_num_tokens)\n return new_embeddings\n\n def _resize_final_logits_bias(self, new_num_tokens: int) -> None:\n old_num_tokens = self.final_logits_bias.shape[-1]\n if new_num_tokens <= old_num_tokens:\n new_bias = self.final_logits_bias[:, :new_num_tokens]\n else:\n extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)\n new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)\n self.register_buffer(\"final_logits_bias\", new_bias)\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\n @add_end_docstrings(PLBART_GENERATION_EXAMPLE)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n labels: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n past_prompt = None,\n ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n \"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if labels is not None:\n if decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n encoder_outputs=encoder_outputs,\n decoder_attention_mask=decoder_attention_mask,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias\n\n masked_lm_loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=masked_lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n decoder_hidden_states=outputs.decoder_hidden_states,\n decoder_attentions=outputs.decoder_attentions,\n cross_attentions=outputs.cross_attentions,\n encoder_last_hidden_state=outputs.encoder_last_hidden_state,\n encoder_hidden_states=outputs.encoder_hidden_states,\n encoder_attentions=outputs.encoder_attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n decoder_input_ids: torch.LongTensor,\n past: Optional[List[torch.FloatTensor]] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.Tensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n use_cache: Optional[bool] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n **kwargs # TODO: Check if this is needed. It is unused?\n ) -> Dict[str, Any]:\n # cut decoder_input_ids if past is used\n if past is not None:\n decoder_input_ids = decoder_input_ids[:, -1:]\n\n return {\n \"input_ids\": None, # encoder_outputs is defined. input_ids not needed\n \"encoder_outputs\": encoder_outputs,\n \"past_key_values\": past,\n \"decoder_input_ids\": decoder_input_ids,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache, # change this to avoid caching (presumably for debugging)\n }\n\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\n return shift_tokens_right(labels, self.config.pad_token_id)\n\n @staticmethod\n def _reorder_cache(past, beam_idx):\n reordered_past = ()\n for layer_past in past:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "PLBartModel", "path": "models_list/adapter/modeling_plbart.py", "snippet": "class PLBartModel(PLBartPreTrainedModel):\n def __init__(self, config: PLBartConfig):\n super().__init__(config)\n\n padding_idx, vocab_size = config.pad_token_id, config.vocab_size\n self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)\n\n self.encoder = PLBartEncoder(config, self.shared)\n self.decoder = PLBartDecoder(config, self.shared)\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.shared\n\n def set_input_embeddings(self, value):\n self.shared = value\n self.encoder.embed_tokens = self.shared\n self.decoder.embed_tokens = self.shared\n\n def get_encoder(self):\n return self.encoder\n\n def get_decoder(self):\n return self.decoder\n\n @add_start_docstrings_to_model_forward(PLBART_INPUTS_DOCSTRING)\n @add_code_sample_docstrings(\n processor_class=_TOKENIZER_FOR_DOC,\n checkpoint=_CHECKPOINT_FOR_DOC,\n output_type=Seq2SeqModelOutput,\n config_class=_CONFIG_FOR_DOC,\n )\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.Tensor] = None,\n head_mask: Optional[torch.Tensor] = None,\n decoder_head_mask: Optional[torch.LongTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[List[torch.FloatTensor]] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds=None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # different to other models, PLBart automatically creates decoder_input_ids from\n # input_ids if no decoder_input_ids are provided\n if decoder_input_ids is None and decoder_inputs_embeds is None:\n decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)\n\n if encoder_outputs is None:\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\n )\n\n # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=encoder_outputs[0],\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n past_key_values=past_key_values,\n inputs_embeds=decoder_inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return decoder_outputs + encoder_outputs\n\n return Seq2SeqModelOutput(\n last_hidden_state=decoder_outputs.last_hidden_state,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )" }, { "identifier": "T5ForConditionalGeneration", "path": "models_list/adapter/modeling_t5.py", "snippet": "class T5ForConditionalGeneration(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n r\"lm_head\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config):\r\n super().__init__(config)\r\n self.model_dim = config.d_model\r\n\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.lm_head = self.lm_head.to(self.decoder.first_device)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.lm_head = self.lm_head.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def set_output_embeddings(self, new_embeddings):\r\n self.lm_head = new_embeddings\r\n\r\n def get_output_embeddings(self):\r\n return self.lm_head\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n labels=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n past_prompt=None, # modified\r\n ):\r\n r\"\"\"\r\n labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):\r\n Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,\r\n config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for\r\n labels in ``[0, ..., config.vocab_size]``\r\n\r\n Returns:\r\n\r\n Examples::\r\n\r\n >>> from transformers import T5Tokenizer, T5ForConditionalGeneration\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5ForConditionalGeneration.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids\r\n >>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids\r\n >>> outputs = model(input_ids=input_ids, labels=labels)\r\n >>> loss = outputs.loss\r\n >>> logits = outputs.logits\r\n\r\n >>> input_ids = tokenizer(\"summarize: studies have shown that owning a dog is good for you \", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model.generate(input_ids)\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n # Convert encoder inputs in embeddings if needed\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n\r\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\r\n # get decoder inputs from shifting lm labels to the right\r\n decoder_input_ids = self._shift_right(labels)\r\n\r\n # If decoding with past key value states, only the last tokens\r\n # should be given as an input\r\n if past_key_values is not None:\r\n assert labels is None, \"Decoder should not use cached key value states when training.\"\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids[:, -1:]\r\n if decoder_inputs_embeds is not None:\r\n decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n past_prompt=past_prompt, # modified\r\n )\r\n\r\n sequence_output = decoder_outputs[0]\r\n\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.encoder.first_device)\r\n self.lm_head = self.lm_head.to(self.encoder.first_device)\r\n sequence_output = sequence_output.to(self.lm_head.weight.device)\r\n\r\n if self.config.tie_word_embeddings:\r\n # Rescale output before projecting on vocab\r\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\r\n sequence_output = sequence_output * (self.model_dim ** -0.5)\r\n\r\n lm_logits = self.lm_head(sequence_output)\r\n\r\n loss = None\r\n if labels is not None:\r\n loss_fct = CrossEntropyLoss(ignore_index=-100)\r\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))\r\n # TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666\r\n\r\n if not return_dict:\r\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\r\n return ((loss,) + output) if loss is not None else output\r\n\r\n return Seq2SeqLMOutput(\r\n loss=loss,\r\n logits=lm_logits,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r\n\r\n def prepare_inputs_for_generation(\r\n self,\r\n input_ids,\r\n past=None,\r\n attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n use_cache=None,\r\n encoder_outputs=None,\r\n **kwargs\r\n ):\r\n\r\n # cut decoder_input_ids if past is used\r\n if past is not None:\r\n input_ids = input_ids[:, -1:]\r\n\r\n return {\r\n \"decoder_input_ids\": input_ids,\r\n \"past_key_values\": past,\r\n \"encoder_outputs\": encoder_outputs,\r\n \"attention_mask\": attention_mask,\r\n \"head_mask\": head_mask,\r\n \"decoder_head_mask\": decoder_head_mask,\r\n \"cross_attn_head_mask\": cross_attn_head_mask,\r\n \"use_cache\": use_cache,\r\n # \"past_prompt\": kwargs['past_prompt'], # modified\r\n }\r\n\r\n def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):\r\n return self._shift_right(labels)\r\n\r\n def _reorder_cache(self, past, beam_idx):\r\n # if decoder past is not included in output\r\n # speedy decoding is disabled and no need to reorder\r\n if past is None:\r\n logger.warning(\"You might want to consider setting `use_cache=True` to speed up decoding\")\r\n return past\r\n\r\n reordered_decoder_past = ()\r\n for layer_past_states in past:\r\n # get the correct batch idx from layer past batch dim\r\n # batch dim of `past` is at 2nd position\r\n reordered_layer_past_states = ()\r\n for layer_past_state in layer_past_states:\r\n # need to set correct `past` for each of the four key / value states\r\n reordered_layer_past_states = reordered_layer_past_states + (\r\n layer_past_state.index_select(0, beam_idx.to(layer_past_state.device)),\r\n )\r\n\r\n assert reordered_layer_past_states[0].shape == layer_past_states[0].shape\r\n assert len(reordered_layer_past_states) == len(layer_past_states)\r\n\r\n reordered_decoder_past = reordered_decoder_past + (reordered_layer_past_states,)\r\n return reordered_decoder_past\r" }, { "identifier": "T5Model", "path": "models_list/adapter/modeling_t5.py", "snippet": "class T5Model(T5PreTrainedModel):\r\n _keys_to_ignore_on_load_missing = [\r\n r\"encoder\\.embed_tokens\\.weight\",\r\n r\"decoder\\.embed_tokens\\.weight\",\r\n ]\r\n _keys_to_ignore_on_load_unexpected = [\r\n r\"decoder\\.block\\.0\\.layer\\.1\\.EncDecAttention\\.relative_attention_bias\\.weight\",\r\n ]\r\n\r\n def __init__(self, config: T5Config):\r\n super().__init__(config)\r\n self.shared = nn.Embedding(config.vocab_size, config.d_model)\r\n\r\n encoder_config = copy.deepcopy(config)\r\n encoder_config.is_decoder = False\r\n encoder_config.use_cache = False\r\n encoder_config.is_encoder_decoder = False\r\n self.encoder = T5Stack(encoder_config, self.shared)\r\n\r\n decoder_config = copy.deepcopy(config)\r\n decoder_config.is_decoder = True\r\n decoder_config.is_encoder_decoder = False\r\n decoder_config.num_layers = config.num_decoder_layers\r\n self.decoder = T5Stack(decoder_config, self.shared)\r\n\r\n self.init_weights()\r\n\r\n # Model parallel\r\n self.model_parallel = False\r\n self.device_map = None\r\n\r\n @add_start_docstrings(PARALLELIZE_DOCSTRING)\r\n def parallelize(self, device_map=None):\r\n self.device_map = (\r\n get_device_map(len(self.encoder.block), range(torch.cuda.device_count()))\r\n if device_map is None\r\n else device_map\r\n )\r\n assert_device_map(self.device_map, len(self.encoder.block))\r\n self.encoder.parallelize(self.device_map)\r\n self.decoder.parallelize(self.device_map)\r\n self.model_parallel = True\r\n\r\n @add_start_docstrings(DEPARALLELIZE_DOCSTRING)\r\n def deparallelize(self):\r\n self.encoder.deparallelize()\r\n self.decoder.deparallelize()\r\n self.encoder = self.encoder.to(\"cpu\")\r\n self.decoder = self.decoder.to(\"cpu\")\r\n self.model_parallel = False\r\n self.device_map = None\r\n torch.cuda.empty_cache()\r\n\r\n def get_input_embeddings(self):\r\n return self.shared\r\n\r\n def set_input_embeddings(self, new_embeddings):\r\n self.shared = new_embeddings\r\n self.encoder.set_input_embeddings(new_embeddings)\r\n self.decoder.set_input_embeddings(new_embeddings)\r\n\r\n def get_encoder(self):\r\n return self.encoder\r\n\r\n def get_decoder(self):\r\n return self.decoder\r\n\r\n def _prune_heads(self, heads_to_prune):\r\n \"\"\"\r\n Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base\r\n class PreTrainedModel\r\n \"\"\"\r\n for layer, heads in heads_to_prune.items():\r\n self.encoder.layer[layer].attention.prune_heads(heads)\r\n\r\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\r\n @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)\r\n def forward(\r\n self,\r\n input_ids=None,\r\n attention_mask=None,\r\n decoder_input_ids=None,\r\n decoder_attention_mask=None,\r\n head_mask=None,\r\n decoder_head_mask=None,\r\n cross_attn_head_mask=None,\r\n encoder_outputs=None,\r\n past_key_values=None,\r\n inputs_embeds=None,\r\n decoder_inputs_embeds=None,\r\n use_cache=None,\r\n output_attentions=None,\r\n output_hidden_states=None,\r\n return_dict=None,\r\n ):\r\n r\"\"\"\r\n Returns:\r\n\r\n Example::\r\n\r\n >>> from transformers import T5Tokenizer, T5Model\r\n\r\n >>> tokenizer = T5Tokenizer.from_pretrained('t5-small')\r\n >>> model = T5Model.from_pretrained('t5-small')\r\n\r\n >>> input_ids = tokenizer(\"Studies have been shown that owning a dog is good for you\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> decoder_input_ids = tokenizer(\"Studies show that\", return_tensors=\"pt\").input_ids # Batch size 1\r\n >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)\r\n\r\n >>> last_hidden_states = outputs.last_hidden_state\r\n \"\"\"\r\n use_cache = use_cache if use_cache is not None else self.config.use_cache\r\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\r\n\r\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\r\n if head_mask is not None and decoder_head_mask is None:\r\n if self.config.num_layers == self.config.num_decoder_layers:\r\n warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)\r\n decoder_head_mask = head_mask\r\n\r\n # Encode if needed (training, first prediction pass)\r\n if encoder_outputs is None:\r\n encoder_outputs = self.encoder(\r\n input_ids=input_ids,\r\n attention_mask=attention_mask,\r\n inputs_embeds=inputs_embeds,\r\n head_mask=head_mask,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\r\n encoder_outputs = BaseModelOutput(\r\n last_hidden_state=encoder_outputs[0],\r\n hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,\r\n attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,\r\n )\r\n\r\n hidden_states = encoder_outputs[0]\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n # Set device for model parallelism\r\n if self.model_parallel:\r\n torch.cuda.set_device(self.decoder.first_device)\r\n hidden_states = hidden_states.to(self.decoder.first_device)\r\n if decoder_input_ids is not None:\r\n decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)\r\n if attention_mask is not None:\r\n attention_mask = attention_mask.to(self.decoder.first_device)\r\n if decoder_attention_mask is not None:\r\n decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)\r\n\r\n # Decode\r\n decoder_outputs = self.decoder(\r\n input_ids=decoder_input_ids,\r\n attention_mask=decoder_attention_mask,\r\n inputs_embeds=decoder_inputs_embeds,\r\n past_key_values=past_key_values,\r\n encoder_hidden_states=hidden_states,\r\n encoder_attention_mask=attention_mask,\r\n head_mask=decoder_head_mask,\r\n cross_attn_head_mask=cross_attn_head_mask,\r\n use_cache=use_cache,\r\n output_attentions=output_attentions,\r\n output_hidden_states=output_hidden_states,\r\n return_dict=return_dict,\r\n )\r\n\r\n if not return_dict:\r\n return decoder_outputs + encoder_outputs\r\n\r\n return Seq2SeqModelOutput(\r\n last_hidden_state=decoder_outputs.last_hidden_state,\r\n past_key_values=decoder_outputs.past_key_values,\r\n decoder_hidden_states=decoder_outputs.hidden_states,\r\n decoder_attentions=decoder_outputs.attentions,\r\n cross_attentions=decoder_outputs.cross_attentions,\r\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\r\n encoder_hidden_states=encoder_outputs.hidden_states,\r\n encoder_attentions=encoder_outputs.attentions,\r\n )\r" } ]
import warnings from collections import OrderedDict from transformers.utils import logging from transformers.models.albert.modeling_albert import ( AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from .modeling_plbart import ( PLBartForConditionalGeneration, PLBartModel, ) from transformers.models.bart.modeling_bart import ( BartForCausalLM, BartForQuestionAnswering, BartForSequenceClassification, ) from transformers.models.bert.modeling_bert import ( BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLMHeadModel, BertModel, ) from transformers.models.bert_generation.modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder from transformers.models.big_bird.modeling_big_bird import ( BigBirdForCausalLM, BigBirdForMaskedLM, BigBirdForMultipleChoice, BigBirdForPreTraining, BigBirdForQuestionAnswering, BigBirdForSequenceClassification, BigBirdForTokenClassification, BigBirdModel, ) from transformers.models.bigbird_pegasus.modeling_bigbird_pegasus import ( BigBirdPegasusForCausalLM, BigBirdPegasusForConditionalGeneration, BigBirdPegasusForQuestionAnswering, BigBirdPegasusForSequenceClassification, BigBirdPegasusModel, ) from transformers.models.blenderbot.modeling_blenderbot import BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel from transformers.models.blenderbot_small.modeling_blenderbot_small import ( BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, ) from transformers.models.camembert.modeling_camembert import ( CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from transformers.models.canine.modeling_canine import ( CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineModel, ) from transformers.models.clip.modeling_clip import CLIPModel from transformers.models.convbert.modeling_convbert import ( ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertModel, ) from transformers.models.ctrl.modeling_ctrl import CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel from transformers.models.deberta.modeling_deberta import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta_v2.modeling_deberta_v2 import ( DebertaV2ForMaskedLM, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, ) from transformers.models.deit.modeling_deit import DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTModel from transformers.models.detr.modeling_detr import DetrForObjectDetection, DetrModel from transformers.models.distilbert.modeling_distilbert import ( DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) from transformers.models.dpr.modeling_dpr import DPRQuestionEncoder from transformers.models.electra.modeling_electra import ( ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ) from transformers.models.encoder_decoder.modeling_encoder_decoder import EncoderDecoderModel from transformers.models.flaubert.modeling_flaubert import ( FlaubertForMultipleChoice, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.fsmt.modeling_fsmt import FSMTForConditionalGeneration, FSMTModel from transformers.models.funnel.modeling_funnel import ( FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, ) from transformers.models.gpt2.modeling_gpt2 import GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model from transformers.models.gpt_neo.modeling_gpt_neo import GPTNeoForCausalLM, GPTNeoForSequenceClassification, GPTNeoModel from transformers.models.hubert.modeling_hubert import HubertModel from transformers.models.ibert.modeling_ibert import ( IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from transformers.models.layoutlm.modeling_layoutlm import ( LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) from transformers.models.led.modeling_led import ( LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, ) from transformers.models.longformer.modeling_longformer import ( LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, ) from transformers.models.luke.modeling_luke import LukeModel from transformers.models.lxmert.modeling_lxmert import LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel from transformers.models.m2m_100.modeling_m2m_100 import M2M100ForConditionalGeneration, M2M100Model from transformers.models.marian.modeling_marian import MarianForCausalLM, MarianModel, MarianMTModel from transformers.models.mbart.modeling_mbart import ( MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, ) from transformers.models.megatron_bert.modeling_megatron_bert import ( MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) from transformers.models.mobilebert.modeling_mobilebert import ( MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) from transformers.models.mpnet.modeling_mpnet import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) from transformers.models.mt5.modeling_mt5 import MT5ForConditionalGeneration, MT5Model from transformers.models.openai.modeling_openai import OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel from transformers.models.pegasus.modeling_pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel from transformers.models.prophetnet.modeling_prophetnet import ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel from transformers.models.rag.modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function RagModel, RagSequenceForGeneration, RagTokenForGeneration, ) from transformers.models.reformer.modeling_reformer import ( ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerModel, ReformerModelWithLMHead, ) from transformers.models.retribert.modeling_retribert import RetriBertModel from transformers.models.roberta.modeling_roberta import ( RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from transformers.models.roformer.modeling_roformer import ( RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerModel, ) from transformers.models.speech_to_text.modeling_speech_to_text import Speech2TextForConditionalGeneration, Speech2TextModel from transformers.models.squeezebert.modeling_squeezebert import ( SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) from .modeling_t5 import T5ForConditionalGeneration, T5Model from transformers.models.tapas.modeling_tapas import ( TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, ) from transformers.models.transfo_xl.modeling_transfo_xl import TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.visual_bert.modeling_visual_bert import VisualBertForPreTraining, VisualBertModel from transformers.models.vit.modeling_vit import ViTForImageClassification, ViTModel from transformers.models.wav2vec2.modeling_wav2vec2 import Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2Model from transformers.models.xlm.modeling_xlm import ( XLMForMultipleChoice, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMWithLMHeadModel, ) from transformers.models.xlm_prophetnet.modeling_xlm_prophetnet import ( XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from transformers.models.xlm_roberta.modeling_xlm_roberta import ( XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from transformers.models.xlnet.modeling_xlnet import ( XLNetForMultipleChoice, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, ) from transformers.models.auto.auto_factory import _BaseAutoModelClass, auto_class_update from transformers.models.auto.configuration_auto import ( AlbertConfig, PLBartConfig, BertConfig, BertGenerationConfig, BigBirdConfig, BigBirdPegasusConfig, BlenderbotConfig, BlenderbotSmallConfig, CamembertConfig, CanineConfig, CLIPConfig, ConvBertConfig, CTRLConfig, DebertaConfig, DebertaV2Config, DeiTConfig, DetrConfig, DistilBertConfig, DPRConfig, ElectraConfig, EncoderDecoderConfig, FlaubertConfig, FSMTConfig, FunnelConfig, GPT2Config, GPTNeoConfig, HubertConfig, IBertConfig, LayoutLMConfig, LEDConfig, LongformerConfig, LukeConfig, LxmertConfig, M2M100Config, MarianConfig, MBartConfig, MegatronBertConfig, MobileBertConfig, MPNetConfig, MT5Config, OpenAIGPTConfig, PegasusConfig, ProphetNetConfig, ReformerConfig, RetriBertConfig, RobertaConfig, RoFormerConfig, Speech2TextConfig, SqueezeBertConfig, T5Config, TapasConfig, TransfoXLConfig, VisualBertConfig, ViTConfig, Wav2Vec2Config, XLMConfig, XLMProphetNetConfig, XLMRobertaConfig, XLNetConfig, )
11,355
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Auto Model class. """ # Add modeling imports here # # Instead of loading the BART from the transformers==4.9.1, we choose to load from our own prefix-tuning version. # Instead of loading the T5 from the transformers==4.9.1, we choose to load from our prefix-tuning version. logger = logging.get_logger(__name__) MODEL_MAPPING = OrderedDict( [ # Base model mapping (VisualBertConfig, VisualBertModel), (CanineConfig, CanineModel), (RoFormerConfig, RoFormerModel), (CLIPConfig, CLIPModel), (BigBirdPegasusConfig, BigBirdPegasusModel), (DeiTConfig, DeiTModel), (LukeConfig, LukeModel), (DetrConfig, DetrModel), (GPTNeoConfig, GPTNeoModel), (BigBirdConfig, BigBirdModel), (Speech2TextConfig, Speech2TextModel), (ViTConfig, ViTModel), (Wav2Vec2Config, Wav2Vec2Model), (HubertConfig, HubertModel), (M2M100Config, M2M100Model), (ConvBertConfig, ConvBertModel), (LEDConfig, LEDModel), (BlenderbotSmallConfig, BlenderbotSmallModel), (RetriBertConfig, RetriBertModel), (MT5Config, MT5Model), (T5Config, T5Model), (PegasusConfig, PegasusModel), (MarianConfig, MarianMTModel), (MBartConfig, MBartModel), (BlenderbotConfig, BlenderbotModel), (DistilBertConfig, DistilBertModel), (AlbertConfig, AlbertModel), (CamembertConfig, CamembertModel), (XLMRobertaConfig, XLMRobertaModel), (PLBartConfig, PLBartModel), (LongformerConfig, LongformerModel), (RobertaConfig, RobertaModel), (LayoutLMConfig, LayoutLMModel), (SqueezeBertConfig, SqueezeBertModel), (BertConfig, BertModel), (OpenAIGPTConfig, OpenAIGPTModel), (GPT2Config, GPT2Model), (MegatronBertConfig, MegatronBertModel), (MobileBertConfig, MobileBertModel), (TransfoXLConfig, TransfoXLModel), (XLNetConfig, XLNetModel), (FlaubertConfig, FlaubertModel), (FSMTConfig, FSMTModel), (XLMConfig, XLMModel), (CTRLConfig, CTRLModel), (ElectraConfig, ElectraModel), (ReformerConfig, ReformerModel), (FunnelConfig, (FunnelModel, FunnelBaseModel)), (LxmertConfig, LxmertModel), (BertGenerationConfig, BertGenerationEncoder), (DebertaConfig, DebertaModel), (DebertaV2Config, DebertaV2Model), (DPRConfig, DPRQuestionEncoder), (XLMProphetNetConfig, XLMProphetNetModel), (ProphetNetConfig, ProphetNetModel), (MPNetConfig, MPNetModel), (TapasConfig, TapasModel), (MarianConfig, MarianModel), (IBertConfig, IBertModel), ] ) MODEL_FOR_PRETRAINING_MAPPING = OrderedDict( [ # Model for pre-training mapping (VisualBertConfig, VisualBertForPreTraining), (LayoutLMConfig, LayoutLMForMaskedLM), (RetriBertConfig, RetriBertModel), (T5Config, T5ForConditionalGeneration), (DistilBertConfig, DistilBertForMaskedLM), (AlbertConfig, AlbertForPreTraining), (CamembertConfig, CamembertForMaskedLM), (XLMRobertaConfig, XLMRobertaForMaskedLM),
(PLBartConfig, PLBartForConditionalGeneration),
0
2023-10-20 09:24:44+00:00
16k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES = getattr(settings, 'DJANGO_LEDGER_USE_CLOSING_ENTRIES', False)\nDJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT = getattr(settings,\n 'DJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT', 3600)\nDJANGO_LEDGER_LOGIN_URL = getattr(settings, 'DJANGO_LEDGER_LOGIN_URL', settings.LOGIN_URL)\nDJANGO_LEDGER_BILL_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_INVOICE_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_FORM_INPUT_CLASSES = getattr(settings, 'DJANGO_LEDGER_FORM_INPUT_CLASSES', 'input')\nDJANGO_LEDGER_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_CURRENCY_SYMBOL', '$')\nDJANGO_LEDGER_SPACED_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_SPACED_CURRENCY_SYMBOL', False)\nDJANGO_LEDGER_SHOW_FEEDBACK_BUTTON = getattr(settings, 'DJANGO_LEDGER_SHOW_FEEDBACK_BUTTON', False)\nDJANGO_LEDGER_FEEDBACK_EMAIL_LIST = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_EMAIL_LIST', [])\nDJANGO_LEDGER_FEEDBACK_FROM_EMAIL = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_FROM_EMAIL', None)\nDJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME = getattr(settings, 'DJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME', False)\nDJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE', Decimal('0.02'))\nDJANGO_LEDGER_TRANSACTION_CORRECTION = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_CORRECTION', Decimal('0.01'))\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE', True)\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', 5)\nDJANGO_LEDGER_ACCOUNT_CODE_USE_PREFIX = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', True)\nDJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')\nDJANGO_LEDGER_PO_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PO_NUMBER_PREFIX', 'PO')\nDJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX', 'E')\nDJANGO_LEDGER_INVOICE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_PREFIX', 'I')\nDJANGO_LEDGER_BILL_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_PREFIX', 'B')\nDJANGO_LEDGER_VENDOR_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_VENDOR_NUMBER_PREFIX', 'V')\nDJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')\nDJANGO_LEDGER_EXPENSE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX', 'IEX')\nDJANGO_LEDGER_INVENTORY_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX', 'INV')\nDJANGO_LEDGER_PRODUCT_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX', 'IPR')\nDJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)\nDJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')\nDJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.bill.BillModelAbstract')\nDJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.invoice.InvoiceModelAbstract')\nDJANGO_LEDGER_DEFAULT_COA = getattr(settings, 'DJANGO_LEDGER_DEFAULT_COA', None)\nDJANGO_LEDGER_FINANCIAL_ANALYSIS = {\n 'ratios': {\n 'current_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'quick_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'debt_to_equity': {\n 'good_incremental': False,\n 'ranges': {\n 'healthy': 0,\n 'watch': .25,\n 'warning': .5,\n 'critical': 1\n }\n },\n 'return_on_equity': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .07,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'return_on_assets': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'net_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'gross_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n }\n}" }, { "identifier": "InvalidDateInputError", "path": "django_ledger/exceptions.py", "snippet": "class InvalidDateInputError(ValidationError):\n pass" }, { "identifier": "TransactionNotInBalanceError", "path": "django_ledger/exceptions.py", "snippet": "class TransactionNotInBalanceError(ValidationError):\n pass" }, { "identifier": "roles", "path": "django_ledger/io/roles.py", "snippet": "DEBIT = 'debit'\nCREDIT = 'credit'\nASSET_CA_CASH = 'asset_ca_cash'\nASSET_CA_MKT_SECURITIES = 'asset_ca_mkt_sec'\nASSET_CA_RECEIVABLES = 'asset_ca_recv'\nASSET_CA_INVENTORY = 'asset_ca_inv'\nASSET_CA_UNCOLLECTIBLES = 'asset_ca_uncoll'\nASSET_CA_PREPAID = 'asset_ca_prepaid'\nASSET_CA_OTHER = 'asset_ca_other'\nASSET_LTI_NOTES_RECEIVABLE = 'asset_lti_notes'\nASSET_LTI_LAND = 'asset_lti_land'\nASSET_LTI_SECURITIES = 'asset_lti_sec'\nASSET_PPE_BUILDINGS = 'asset_ppe_build'\nASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION = 'asset_ppe_build_accum_depr'\nASSET_PPE_EQUIPMENT = 'asset_ppe_equip'\nASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION = 'asset_ppe_equip_accum_depr'\nASSET_PPE_PLANT = 'asset_ppe_plant'\nASSET_PPE_PLANT_ACCUM_DEPRECIATION = 'asset_ppe_plant_depr'\nASSET_INTANGIBLE_ASSETS = 'asset_ia'\nASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION = 'asset_ia_accum_amort'\nASSET_ADJUSTMENTS = 'asset_adjustment'\nLIABILITY_CL_ACC_PAYABLE = 'lia_cl_acc_payable'\nLIABILITY_CL_WAGES_PAYABLE = 'lia_cl_wages_payable'\nLIABILITY_CL_TAXES_PAYABLE = 'lia_cl_taxes_payable'\nLIABILITY_CL_INTEREST_PAYABLE = 'lia_cl_int_payable'\nLIABILITY_CL_ST_NOTES_PAYABLE = 'lia_cl_st_notes_payable'\nLIABILITY_CL_LTD_MATURITIES = 'lia_cl_ltd_mat'\nLIABILITY_CL_DEFERRED_REVENUE = 'lia_cl_def_rev'\nLIABILITY_CL_OTHER = 'lia_cl_other'\nLIABILITY_LTL_NOTES_PAYABLE = 'lia_ltl_notes'\nLIABILITY_LTL_BONDS_PAYABLE = 'lia_ltl_bonds'\nLIABILITY_LTL_MORTGAGE_PAYABLE = 'lia_ltl_mortgage'\nEQUITY_CAPITAL = 'eq_capital'\nEQUITY_ADJUSTMENT = 'eq_adjustment'\nEQUITY_COMMON_STOCK = 'eq_stock_common'\nEQUITY_PREFERRED_STOCK = 'eq_stock_preferred'\nEQUITY_DIVIDENDS = 'eq_dividends'\nINCOME_OPERATIONAL = 'in_operational'\nINCOME_PASSIVE = 'in_passive'\nINCOME_CAPITAL_GAIN_LOSS = 'in_gain_loss'\nINCOME_INTEREST = 'in_interest'\nINCOME_OTHER = 'in_other'\nCOGS = 'cogs_regular'\nEXPENSE_OPERATIONAL = 'ex_regular'\nEXPENSE_CAPITAL = 'ex_capital'\nEXPENSE_DEPRECIATION = 'ex_depreciation'\nEXPENSE_AMORTIZATION = 'ex_amortization'\nEXPENSE_TAXES = 'ex_taxes'\nEXPENSE_INTEREST_ST = 'ex_interest_st'\nEXPENSE_INTEREST_LT = 'ex_interest'\nEXPENSE_OTHER = 'ex_other'\nROOT_COA = 'root_coa'\nROOT_ASSETS = 'root_assets'\nROOT_LIABILITIES = 'root_liabilities'\nROOT_CAPITAL = 'root_capital'\nROOT_INCOME = 'root_income'\nROOT_COGS = 'root_cogs'\nROOT_EXPENSES = 'root_expenses'\nROOT_GROUP = [\n ROOT_COA,\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_LEVEL_2 = [\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_META = {\n ROOT_COA: {\n 'code': '00000',\n 'title': 'CoA Root Node',\n 'balance_type': DEBIT\n },\n ROOT_ASSETS: {\n 'code': '01000',\n 'title': 'Asset Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_LIABILITIES: {\n 'code': '02000',\n 'title': 'Liability Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_CAPITAL: {\n 'code': '03000',\n 'title': 'Capital Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_INCOME: {\n 'code': '04000',\n 'title': 'Income Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_COGS: {\n 'code': '05000',\n 'title': 'COGS Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_EXPENSES: {\n 'code': '06000',\n 'title': 'Expense Accounts Root Node',\n 'balance_type': DEBIT\n },\n}\nGROUP_QUICK_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES\n]\nGROUP_CURRENT_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES,\n ASSET_CA_INVENTORY,\n ASSET_CA_RECEIVABLES,\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_NON_CURRENT_ASSETS = [\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_LAND,\n ASSET_LTI_SECURITIES,\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION,\n ASSET_INTANGIBLE_ASSETS,\n ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION,\n ASSET_ADJUSTMENTS\n]\nGROUP_ASSETS = GROUP_CURRENT_ASSETS + GROUP_NON_CURRENT_ASSETS\nGROUP_CURRENT_LIABILITIES = [\n LIABILITY_CL_ACC_PAYABLE,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_OTHER,\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE\n]\nGROUP_LT_LIABILITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n]\nGROUP_LIABILITIES = GROUP_CURRENT_LIABILITIES + GROUP_LT_LIABILITIES\nGROUP_CAPITAL = [\n EQUITY_CAPITAL,\n EQUITY_COMMON_STOCK,\n EQUITY_PREFERRED_STOCK,\n EQUITY_DIVIDENDS,\n EQUITY_ADJUSTMENT\n]\nGROUP_INCOME = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_COGS = [\n COGS\n]\nGROUP_EXPENSES = [\n EXPENSE_OPERATIONAL,\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_NET_PROFIT = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER,\n COGS\n]\nGROUP_GROSS_PROFIT = [\n INCOME_OPERATIONAL,\n COGS\n]\nGROUP_NET_SALES = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE\n]\nGROUP_PPE_ACCUM_DEPRECIATION = [\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION\n]\nGROUP_EXPENSE_DEP_AND_AMT = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_EARNINGS = GROUP_INCOME + GROUP_COGS + GROUP_EXPENSES\nGROUP_EQUITY = GROUP_CAPITAL + GROUP_EARNINGS\nGROUP_LIABILITIES_EQUITY = GROUP_LIABILITIES + GROUP_EQUITY\nGROUP_INVOICE = [ASSET_CA_CASH, ASSET_CA_RECEIVABLES, LIABILITY_CL_DEFERRED_REVENUE]\nGROUP_BILL = [ASSET_CA_CASH, ASSET_CA_PREPAID, LIABILITY_CL_ACC_PAYABLE]\nGROUP_IC_OPERATING_REVENUES = [INCOME_OPERATIONAL]\nGROUP_IC_OPERATING_COGS = [COGS]\nGROUP_IC_OPERATING_EXPENSES = [EXPENSE_OPERATIONAL]\nGROUP_IC_OTHER_REVENUES = [\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_IC_OTHER_EXPENSES = [\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_CFS_NET_INCOME = GROUP_EARNINGS\nGROUP_CFS_OP_DEPRECIATION_AMORTIZATION = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_CFS_OP_INVESTMENT_GAINS = [\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_OP_ACCOUNTS_RECEIVABLE = [\n ASSET_CA_RECEIVABLES\n]\nGROUP_CFS_OP_INVENTORY = [\n ASSET_CA_INVENTORY\n]\nGROUP_CFS_OP_ACCOUNTS_PAYABLE = [\n LIABILITY_CL_ACC_PAYABLE\n]\nGROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT = [\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT = [\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_OTHER,\n]\nGROUP_CFS_OPERATING = list(chain.from_iterable([\n GROUP_CFS_NET_INCOME,\n GROUP_CFS_OP_DEPRECIATION_AMORTIZATION,\n GROUP_CFS_OP_INVESTMENT_GAINS,\n GROUP_CFS_OP_ACCOUNTS_RECEIVABLE,\n GROUP_CFS_OP_INVENTORY,\n GROUP_CFS_OP_ACCOUNTS_PAYABLE,\n GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT,\n GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT\n]))\nGROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]\nGROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]\nGROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]\nGROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]\nGROUP_CFS_FINANCING = GROUP_CFS_FIN_ISSUING_EQUITY + GROUP_CFS_FIN_DIVIDENDS\nGROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE = [\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_PLANT,\n ASSET_PPE_EQUIPMENT,\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_INV_LTD_OF_PPE = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n]\nGROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE\nGROUP_CFS_INV_PURCHASE_OF_SECURITIES = [\n ASSET_CA_MKT_SECURITIES,\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_SECURITIES,\n INCOME_INTEREST,\n INCOME_PASSIVE,\n]\nGROUP_CFS_INV_LTD_OF_SECURITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE\n]\nGROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES\nGROUP_CFS_INVESTING = GROUP_CFS_INVESTING_PPE + GROUP_CFS_INVESTING_SECURITIES\nGROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING\nBS_ASSET_ROLE = 'assets'\nBS_LIABILITIES_ROLE = 'liabilities'\nBS_EQUITY_ROLE = 'equity'\nACCOUNT_ROLE_CHOICES = [\n (BS_ASSET_ROLE.capitalize(), (\n # CURRENT ASSETS ----\n (ASSET_CA_CASH, _('Current Asset')),\n (ASSET_CA_MKT_SECURITIES, _('Marketable Securities')),\n (ASSET_CA_RECEIVABLES, _('Receivables')),\n (ASSET_CA_INVENTORY, _('Inventory')),\n (ASSET_CA_UNCOLLECTIBLES, _('Uncollectibles')),\n (ASSET_CA_PREPAID, _('Prepaid')),\n (ASSET_CA_OTHER, _('Other Liquid Assets')),\n\n # LONG TERM INVESTMENTS ---\n (ASSET_LTI_NOTES_RECEIVABLE, _('Notes Receivable')),\n (ASSET_LTI_LAND, _('Land')),\n (ASSET_LTI_SECURITIES, _('Securities')),\n\n # PPE ...\n (ASSET_PPE_BUILDINGS, _('Buildings')),\n (ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION, _('Buildings - Accum. Depreciation')),\n (ASSET_PPE_PLANT, _('Plant')),\n (ASSET_PPE_PLANT_ACCUM_DEPRECIATION, _('Plant - Accum. Depreciation')),\n (ASSET_PPE_EQUIPMENT, _('Equipment')),\n (ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION, _('Equipment - Accum. Depreciation')),\n\n # Other Assets ...\n (ASSET_INTANGIBLE_ASSETS, _('Intangible Assets')),\n (ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION, _('Intangible Assets - Accum. Amortization')),\n (ASSET_ADJUSTMENTS, _('Other Assets')),\n )),\n (BS_LIABILITIES_ROLE.capitalize(), (\n\n # CURRENT LIABILITIES ---\n (LIABILITY_CL_ACC_PAYABLE, _('Accounts Payable')),\n (LIABILITY_CL_WAGES_PAYABLE, _('Wages Payable')),\n (LIABILITY_CL_INTEREST_PAYABLE, _('Interest Payable')),\n (LIABILITY_CL_TAXES_PAYABLE, _('Taxes Payable')),\n (LIABILITY_CL_ST_NOTES_PAYABLE, _('Short Term Notes Payable')),\n (LIABILITY_CL_LTD_MATURITIES, _('Current Maturities of Long Tern Debt')),\n (LIABILITY_CL_DEFERRED_REVENUE, _('Deferred Revenue')),\n (LIABILITY_CL_OTHER, _('Other Liabilities')),\n\n # LONG TERM LIABILITIES ----\n (LIABILITY_LTL_NOTES_PAYABLE, _('Long Term Notes Payable')),\n (LIABILITY_LTL_BONDS_PAYABLE, _('Bonds Payable')),\n (LIABILITY_LTL_MORTGAGE_PAYABLE, _('Mortgage Payable')),\n )),\n (BS_EQUITY_ROLE.capitalize(), (\n\n # EQUITY ---\n (EQUITY_CAPITAL, _('Capital')),\n (EQUITY_COMMON_STOCK, _('Common Stock')),\n (EQUITY_PREFERRED_STOCK, _('Preferred Stock')),\n (EQUITY_ADJUSTMENT, _('Other Equity Adjustments')),\n (EQUITY_DIVIDENDS, _('Dividends & Distributions to Shareholders')),\n\n # INCOME ---\n (INCOME_OPERATIONAL, _('Operational Income')),\n (INCOME_PASSIVE, _('Investing/Passive Income')),\n (INCOME_INTEREST, _('Interest Income')),\n (INCOME_CAPITAL_GAIN_LOSS, _('Capital Gain/Loss Income')),\n (INCOME_OTHER, _('Other Income')),\n\n # COGS ----\n (COGS, _('Cost of Goods Sold')),\n\n # EXPENSES ----\n (EXPENSE_OPERATIONAL, _('Regular Expense')),\n (EXPENSE_INTEREST_ST, _('Interest Expense - Short Term Debt')),\n (EXPENSE_INTEREST_LT, _('Interest Expense - Long Term Debt')),\n (EXPENSE_TAXES, _('Tax Expense')),\n (EXPENSE_CAPITAL, _('Capital Expense')),\n (EXPENSE_DEPRECIATION, _('Depreciation Expense')),\n (EXPENSE_AMORTIZATION, _('Amortization Expense')),\n (EXPENSE_OTHER, _('Other Expense')),\n )),\n ('Root', (\n (ROOT_COA, 'CoA Root Account'),\n (ROOT_ASSETS, 'Assets Root Account'),\n (ROOT_LIABILITIES, 'Liabilities Root Account'),\n (ROOT_CAPITAL, 'Capital Root Account'),\n (ROOT_INCOME, 'Income Root Account'),\n (ROOT_COGS, 'COGS Root Account'),\n (ROOT_EXPENSES, 'Expenses Root Account'),\n ))\n]\nACCOUNT_CHOICES_NO_ROOT = [c for c in ACCOUNT_ROLE_CHOICES if c[0] != 'Root']\nROLES_ORDER_ASSETS = [a[0] for a in ACCOUNT_ROLE_CHOICES[0][1]]\nROLES_ORDER_LIABILITIES = [a[0] for a in ACCOUNT_ROLE_CHOICES[1][1]]\nROLES_ORDER_CAPITAL = [a[0] for a in ACCOUNT_ROLE_CHOICES[2][1]]\nROLES_ORDER_ALL = list(chain.from_iterable([ROLES_ORDER_ASSETS, ROLES_ORDER_LIABILITIES, ROLES_ORDER_CAPITAL]))\nACCOUNT_LIST_ROLE_ORDER = list(r[0] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT]))\nACCOUNT_LIST_ROLE_VERBOSE = {r[0]: r[1] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT])}\nROLE_TUPLES = sum([[(r[0].lower(), s[0]) for s in r[1]] for r in ACCOUNT_ROLE_CHOICES], list())\nROLE_DICT = dict([(t[0].lower(), [r[0] for r in t[1]]) for t in ACCOUNT_ROLE_CHOICES])\nVALID_ROLES = [r[1] for r in ROLE_TUPLES]\nBS_ROLES = dict((r[1], r[0]) for r in ROLE_TUPLES)\nBS_BUCKETS = {\n '0': 'Root',\n '1': 'Asset',\n '2': 'Liability',\n '3': 'Capital',\n '4': 'Income',\n '5': 'COGS',\n '6': 'Expenses'\n}\nBS_BUCKETS_ORDER = [v for _, v in BS_BUCKETS.items() if v != 'Root']\nROLES_VARS = locals().keys()\nROLES_DIRECTORY = dict()\nROLES_CATEGORIES = ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'COGS', 'EXPENSE']\nROLES_GROUPS = [g for g in ROLES_VARS if g.split('_')[0] == 'GROUP']\nGROUPS_DIRECTORY = dict()\ndef validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:" }, { "identifier": "RoleContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class RoleContextManager:\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.DIGEST = io_data\n self.DIGEST['role_account'] = None\n self.DIGEST['role_balance'] = None\n\n self.ACCOUNTS = io_data['accounts']\n\n self.ROLES_ACCOUNTS = dict()\n self.ROLES_BALANCES = dict()\n self.ROLES_BALANCE_SHEET = dict()\n\n if self.BY_PERIOD:\n self.ROLES_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_period'] = None\n if self.BY_UNIT:\n self.ROLES_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_unit'] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_roles()\n self.DIGEST['role_account'] = self.ROLES_ACCOUNTS\n self.DIGEST['role_balance'] = self.ROLES_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['role_balance_by_period'] = self.ROLES_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['role_balance_by_unit'] = self.ROLES_BALANCES_BY_UNIT\n\n return self.DIGEST\n\n def process_roles(self):\n\n for c, l in roles_module.ROLES_DIRECTORY.items():\n for r in l:\n acc_list = list(acc for acc in self.ACCOUNTS if acc['role'] == getattr(roles_module, r))\n\n self.ROLES_ACCOUNTS[r] = acc_list\n self.ROLES_BALANCES[r] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ROLES_BALANCES_BY_PERIOD[key][r] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ROLES_BALANCES_BY_UNIT[key][r] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "GroupContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class GroupContextManager:\n GROUP_ACCOUNTS_KEY = 'group_account'\n GROUP_BALANCE_KEY = 'group_balance'\n GROUP_BALANCE_BY_UNIT_KEY = 'group_balance_by_unit'\n GROUP_BALANCE_BY_PERIOD_KEY = 'group_balance_by_period'\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.IO_DIGEST = io_data\n\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = None\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = None\n\n self.DIGEST_ACCOUNTS = io_data['accounts']\n\n self.GROUPS_ACCOUNTS = dict()\n self.GROUPS_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.GROUPS_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n if self.BY_UNIT:\n self.GROUPS_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.GROUPS_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n def digest(self):\n\n self.process_groups()\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = self.GROUPS_ACCOUNTS\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = self.GROUPS_BALANCES\n\n if self.BY_PERIOD:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = self.GROUPS_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = self.GROUPS_BALANCES_BY_UNIT\n return self.IO_DIGEST\n\n def get_accounts_generator(self, mod, g):\n return (acc for acc in self.DIGEST_ACCOUNTS if acc['role'] in getattr(mod, g))\n\n def process_groups(self):\n for g in roles_module.ROLES_GROUPS:\n acc_list = list(self.get_accounts_generator(roles_module, g))\n self.GROUPS_ACCOUNTS[g] = acc_list\n self.GROUPS_BALANCES[g] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.GROUPS_BALANCES_BY_PERIOD[key][g] = sum(\n acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.GROUPS_BALANCES_BY_UNIT[key][g] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0]\n )" }, { "identifier": "ActivityContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class ActivityContextManager:\n\n def __init__(self,\n io_data: dict,\n by_unit: bool = False,\n by_period: bool = False):\n\n self.DIGEST = io_data\n self.DIGEST['activity_account'] = None\n self.DIGEST['activity_balance'] = None\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.ACCOUNTS = io_data['accounts']\n self.ACTIVITY_ACCOUNTS = dict()\n self.ACTIVITY_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.ACTIVITY_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_period'] = None\n if self.BY_UNIT:\n self.ACTIVITY_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_unit'] = None\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_activity()\n self.DIGEST['activity_account'] = self.ACTIVITY_ACCOUNTS\n self.DIGEST['activity_balance'] = self.ACTIVITY_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['activity_balance_by_period'] = self.ACTIVITY_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['activity_balance_by_unit'] = self.ACTIVITY_BALANCES_BY_PERIOD\n\n def get_accounts_generator(self, activity: str):\n return (acc for acc in self.ACCOUNTS if acc['activity'] == activity)\n\n def process_activity(self):\n JournalEntryModel = lazy_importer.get_journal_entry_model()\n for act in JournalEntryModel.VALID_ACTIVITIES:\n acc_list = list(self.get_accounts_generator(act))\n self.ACTIVITY_ACCOUNTS[act] = acc_list\n self.ACTIVITY_BALANCES[act] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ACTIVITY_BALANCES_BY_PERIOD[key][act] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ACTIVITY_BALANCES_BY_UNIT[key][act] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "BalanceSheetStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class BalanceSheetStatementContextManager:\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n gb_bs = {\n bsr: list(l) for bsr, l in groupby(\n chain.from_iterable(\n [\n self.DIGEST['group_account']['GROUP_ASSETS'],\n self.DIGEST['group_account']['GROUP_LIABILITIES'],\n self.DIGEST['group_account']['GROUP_CAPITAL'],\n ]\n ),\n key=lambda acc: acc['role_bs'])\n }\n\n bs_context = {\n bs_role: {\n 'total_balance': sum(a['balance'] for a in gb),\n 'is_block': True,\n 'roles': {\n r: {\n 'accounts': list(a)\n } for r, a in groupby(list(gb), key=lambda acc: acc['role'])\n }\n } for bs_role, gb in gb_bs.items()\n }\n\n for bs_role, bs_role_data in bs_context.items():\n for acc_role, role_data in bs_role_data['roles'].items():\n role_data['total_balance'] = sum(a['balance'] for a in role_data['accounts'])\n role_data['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc_role]\n\n bs_context['equity_balance'] = self.DIGEST['group_balance']['GROUP_EQUITY']\n bs_context['retained_earnings_balance'] = self.DIGEST['group_balance']['GROUP_EARNINGS']\n bs_context['liabilities_equity_balance'] = self.DIGEST['group_balance']['GROUP_LIABILITIES_EQUITY']\n\n self.DIGEST['balance_sheet'] = bs_context\n\n return self.DIGEST" }, { "identifier": "IncomeStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class IncomeStatementContextManager:\n\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n self.DIGEST['income_statement'] = {\n 'operating': {\n 'revenues': [\n acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_REVENUES\n ],\n 'cogs': [\n acc for acc in self.DIGEST['group_account']['GROUP_COGS'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_COGS\n ],\n 'expenses': [\n acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_EXPENSES\n ]\n },\n 'other': {\n 'revenues': [acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_REVENUES],\n 'expenses': [acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_EXPENSES],\n }\n }\n\n for activity, ic_section in self.DIGEST['income_statement'].items():\n for section, acc_list in ic_section.items():\n for acc in acc_list:\n acc['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc['role']]\n\n # OPERATING INCOME...\n self.DIGEST['income_statement']['operating']['gross_profit'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs']\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs'],\n self.DIGEST['income_statement']['operating']['expenses'],\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_revenue'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['revenues']\n )\n self.DIGEST['income_statement']['operating']['net_cogs'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['cogs']\n )\n self.DIGEST['income_statement']['operating']['net_operating_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['expenses']\n )\n\n # OTHER INCOME....\n self.DIGEST['income_statement']['other']['net_other_revenues'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['revenues']\n )\n self.DIGEST['income_statement']['other']['net_other_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['expenses']\n )\n self.DIGEST['income_statement']['other']['net_other_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['other']['revenues'],\n self.DIGEST['income_statement']['other']['expenses']\n ]\n ))\n\n # NET INCOME...\n self.DIGEST['income_statement']['net_income'] = self.DIGEST['income_statement']['operating'][\n 'net_operating_income']\n self.DIGEST['income_statement']['net_income'] += self.DIGEST['income_statement']['other'][\n 'net_other_income']\n return self.DIGEST" }, { "identifier": "CashFlowStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class CashFlowStatementContextManager:\n CFS_DIGEST_KEY = 'cash_flow_statement'\n\n # todo: implement by period and by unit...\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n self.IO_DIGEST = io_data\n self.CASH_ACCOUNTS = [a for a in self.IO_DIGEST['accounts'] if a['role'] == roles_module.ASSET_CA_CASH]\n self.JE_MODEL = lazy_loader.get_journal_entry_model()\n\n def check_io_digest(self):\n if GroupContextManager.GROUP_BALANCE_KEY not in self.IO_DIGEST:\n raise ValidationError(\n 'IO Digest must have groups for Cash Flow Statement'\n )\n\n def operating(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n operating_activities = dict()\n operating_activities['GROUP_CFS_NET_INCOME'] = {\n 'description': 'Net Income',\n 'balance': group_balances['GROUP_CFS_NET_INCOME']\n }\n operating_activities['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION'] = {\n 'description': 'Depreciation & Amortization of Assets',\n 'balance': -group_balances['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION']\n }\n operating_activities['GROUP_CFS_OP_INVESTMENT_GAINS'] = {\n 'description': 'Gain/Loss Sale of Assets',\n 'balance': group_balances['GROUP_CFS_OP_INVESTMENT_GAINS']\n }\n operating_activities['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE'] = {\n 'description': 'Accounts Receivable',\n 'balance': -group_balances['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE']\n }\n operating_activities['GROUP_CFS_OP_INVENTORY'] = {\n 'description': 'Inventories',\n 'balance': -group_balances['GROUP_CFS_OP_INVENTORY']\n }\n\n operating_activities['GROUP_CFS_OP_ACCOUNTS_PAYABLE'] = {\n 'description': 'Accounts Payable',\n 'balance': group_balances['GROUP_CFS_OP_ACCOUNTS_PAYABLE']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT'] = {\n 'description': 'Other Current Assets',\n 'balance': -group_balances['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT'] = {\n 'description': 'Other Current Liabilities',\n 'balance': group_balances['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT']\n }\n\n net_cash_by_op_activities = sum(i['balance'] for g, i in operating_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['operating'] = operating_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'] = dict(\n OPERATING=net_cash_by_op_activities\n )\n\n def financing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n financing_activities = dict()\n financing_activities['GROUP_CFS_FIN_ISSUING_EQUITY'] = {\n 'description': 'Common Stock, Preferred Stock and Capital Raised',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_EQUITY)\n }\n financing_activities['GROUP_CFS_FIN_DIVIDENDS'] = {\n 'description': 'Dividends Payed Out to Shareholders',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_DIVIDENDS)\n }\n financing_activities['GROUP_CFS_FIN_ST_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Short-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_STD)\n }\n financing_activities['GROUP_CFS_FIN_LT_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Long-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_LTD)\n }\n\n net_cash = sum(i['balance'] for g, i in financing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['financing'] = financing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['FINANCING'] = net_cash\n\n def investing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n investing_activities = dict()\n investing_activities['GROUP_CFS_INVESTING_SECURITIES'] = {\n 'description': 'Purchase, Maturity and Sales of Investments & Securities',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_SECURITIES)\n }\n investing_activities['GROUP_CFS_INVESTING_PPE'] = {\n 'description': 'Addition and Disposition of Property, Plant & Equipment',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_PPE)\n }\n\n net_cash = sum(i['balance'] for g, i in investing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['investing'] = investing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['INVESTING'] = net_cash\n\n def net_cash(self):\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash'] = sum([\n bal for act, bal in self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'].items()\n ])\n\n def digest(self):\n self.check_io_digest()\n self.operating()\n self.financing()\n self.investing()\n self.net_cash()\n return self.IO_DIGEST" }, { "identifier": "IODigestContextManager", "path": "django_ledger/io/io_digest.py", "snippet": "class IODigestContextManager:\n\n def __init__(self, io_data: defaultdict):\n self.IO_DATA: defaultdict = io_data\n self.IO_MODEL = self.IO_DATA['io_model']\n self.TXS_QS = self.IO_DATA['txs_qs']\n self.STRFTIME_FORMAT = '%B %d, %Y'\n\n def get_io_data(self) -> defaultdict:\n return self.IO_DATA\n\n def get_strftime_format(self):\n return self.STRFTIME_FORMAT\n\n def get_from_date(self, as_str: bool = False, fmt=None) -> Optional[date]:\n from_date = self.IO_DATA['from_date']\n if from_date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return from_date.strftime(fmt)\n return from_date\n\n def get_to_date(self, as_str: bool = False, fmt=None) -> date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return self.IO_DATA['to_date'].strftime(fmt)\n return self.IO_DATA['to_date']\n\n def is_entity_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_entity_model()\n )\n\n def is_ledger_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_ledger_model()\n )\n\n def is_unit_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_unit_model()\n )\n\n def is_by_unit(self) -> bool:\n return self.IO_DATA['by_unit']\n\n def is_by_period(self) -> bool:\n return self.IO_DATA['by_period']\n\n def is_by_activity(self) -> bool:\n return self.IO_DATA['by_activity']\n\n # Balance Sheet Data...\n def has_balance_sheet(self) -> bool:\n return 'balance_sheet' in self.IO_DATA\n\n def get_balance_sheet_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['balance_sheet']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have balance sheet information available.'\n )\n\n # Income Statement Data...\n def has_income_statement(self) -> bool:\n return 'income_statement' in self.IO_DATA\n\n def get_income_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['income_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have income statement information available.'\n )\n\n # Cash Flow Statement Data...\n def has_cash_flow_statement(self):\n return 'cash_flow_statement' in self.IO_DATA\n\n def get_cash_flow_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['cash_flow_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have cash flow statement information available.'\n )\n\n # CLOSING ENTRIES...\n\n def get_closing_entry_data(self):\n io_data = self.get_io_data()\n return io_data['accounts']" }, { "identifier": "FinancialRatioManager", "path": "django_ledger/io/ratios.py", "snippet": "class FinancialRatioManager:\n\n def __init__(self, io_data):\n self.DIGEST = io_data\n self.ACCOUNTS = io_data['accounts']\n self.RATIO_NA = RATIO_NA\n\n self.quick_assets = io_data['group_balance']['GROUP_QUICK_ASSETS']\n self.assets = io_data['group_balance']['GROUP_ASSETS']\n self.current_liabilities = io_data['group_balance']['GROUP_CURRENT_LIABILITIES']\n self.current_assets = io_data['group_balance']['GROUP_CURRENT_ASSETS']\n self.equity = io_data['group_balance']['GROUP_CAPITAL']\n self.liabilities = io_data['group_balance']['GROUP_LIABILITIES']\n self.net_income = io_data['group_balance']['GROUP_EARNINGS']\n self.net_sales = io_data['group_balance']['GROUP_NET_SALES']\n self.net_profit = io_data['group_balance']['GROUP_NET_PROFIT']\n self.gross_profit = io_data['group_balance']['GROUP_GROSS_PROFIT']\n self.RATIOS = dict()\n\n def digest(self):\n self.quick_ratio()\n self.current_ratio()\n self.debt_to_equity()\n self.return_on_equity()\n self.return_on_assets()\n self.net_profit_margin()\n self.gross_profit_margin()\n self.DIGEST['ratios'] = self.RATIOS\n return self.DIGEST\n\n # ------> SOLVENCY RATIOS <------\n def quick_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = self.RATIO_NA\n else:\n cr = self.quick_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['quick_ratio'] = cr\n\n def current_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = RATIO_NA\n else:\n cr = self.current_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['current_ratio'] = cr\n\n # ------> LEVERAGE RATIOS <------\n def debt_to_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.liabilities / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['debt_to_equity'] = cr\n\n # ------> PROFITABILITY RATIOS <------\n def return_on_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_equity'] = cr\n\n def return_on_assets(self, as_percent=False):\n if self.assets == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.assets\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_assets'] = cr\n\n def net_profit_margin(self, as_percent=False):\n if self.net_sales == 0:\n npm = RATIO_NA\n else:\n npm = self.net_profit / self.net_sales\n if as_percent:\n npm = npm * 100\n self.RATIOS['net_profit_margin'] = npm\n\n def gross_profit_margin(self, as_percent=False):\n if self.gross_profit == 0:\n gpm = RATIO_NA\n else:\n gpm = self.gross_profit / self.net_sales\n if as_percent:\n gpm = gpm * 100\n self.RATIOS['gross_profit_margin'] = gpm" }, { "identifier": "lazy_loader", "path": "django_ledger/models/utils.py", "snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):" } ]
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
14,209
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt:
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt:
raise InvalidDateInputError(
1
2023-10-20 01:07:20+00:00
16k
Glasgow-AI4BioMed/GenKIE
tasks/pretrain_tasks/unify_task.py
[ { "identifier": "OFATask", "path": "tasks/ofa_task.py", "snippet": "class OFATask(FairseqTask):\n def __init__(self, cfg: OFAConfig, src_dict, tgt_dict):\n super().__init__(cfg)\n self.src_dict = src_dict\n self.tgt_dict = tgt_dict\n\n @classmethod\n def setup_task(cls, cfg: DictConfig, **kwargs):\n \"\"\"Setup the task.\"\"\"\n\n # load dictionaries\n src_dict = cls.load_dictionary(\n os.path.join(cfg.bpe_dir, \"dict.txt\")\n )\n tgt_dict = cls.load_dictionary(\n os.path.join(cfg.bpe_dir, \"dict.txt\")\n )\n src_dict.add_symbol(\"<mask>\")\n tgt_dict.add_symbol(\"<mask>\")\n for i in range(cfg.code_dict_size):\n src_dict.add_symbol(\"<code_{}>\".format(i))\n tgt_dict.add_symbol(\"<code_{}>\".format(i))\n # quantization\n for i in range(cfg.num_bins):\n src_dict.add_symbol(\"<bin_{}>\".format(i))\n tgt_dict.add_symbol(\"<bin_{}>\".format(i))\n\n src_dict.add_symbol(\"<dsep>\")\n tgt_dict.add_symbol(\"<dsep>\")\n\n # self.sep_index = self.add_symbol('50257')\n src_dict.add_symbol(\"50257\")\n tgt_dict.add_symbol(\"50257\")\n\n logger.info(\"source dictionary: {} types\".format(len(src_dict)))\n logger.info(\"target dictionary: {} types\".format(len(tgt_dict)))\n return cls(cfg, src_dict, tgt_dict)\n\n def get_batch_iterator(\n self,\n dataset,\n max_tokens=None,\n max_sentences=None,\n max_positions=None,\n ignore_invalid_inputs=False,\n required_batch_size_multiple=1,\n seed=1,\n num_shards=1,\n shard_id=0,\n num_workers=0,\n epoch=1,\n data_buffer_size=0,\n disable_iterator_cache=False,\n ):\n assert isinstance(dataset, FairseqDataset)\n\n # initialize the dataset with the correct starting epoch\n dataset.set_epoch(epoch)\n\n # create mini-batches with given size constraints\n batch_sampler = [\n [j for j in range(i, min(i + max_sentences, len(dataset)))]\n for i in range(0, len(dataset), max_sentences)\n ]\n total_row_count = dataset.dataset.get_total_row_count()\n num_batches = math.ceil(math.ceil(total_row_count / num_shards) / max_sentences)\n if len(batch_sampler) < num_batches:\n batch_sampler.append([])\n\n # return a reusable, sharded iterator\n epoch_iter = iterators.EpochBatchIterator(\n dataset=dataset,\n collate_fn=dataset.collater,\n batch_sampler=batch_sampler,\n seed=seed,\n num_shards=1,\n shard_id=0,\n num_workers=num_workers,\n epoch=epoch,\n buffer_size=data_buffer_size\n )\n\n return epoch_iter\n\n def build_model(self, cfg: FairseqDataclass):\n model = super().build_model(cfg)\n if self.cfg.bpe == 'bert': # self.cfg.bpe=None\n bpe_dict = {\n \"_name\": \"bert\",\n \"bpe_vocab_file\": os.path.join(self.cfg.bpe_dir, \"vocab.txt\"),\n \"bpe_cased\": False\n }\n bpe_dict = DictConfig(bpe_dict)\n self.bpe = self.build_bpe(bpe_dict)\n else:\n bpe_dict = {\n \"_name\": \"gpt2\",\n \"gpt2_encoder_json\": os.path.join(self.cfg.bpe_dir, \"encoder.json\"),\n \"gpt2_vocab_bpe\": os.path.join(self.cfg.bpe_dir, \"vocab.bpe\")\n }\n bpe_dict = DictConfig(bpe_dict)\n self.bpe = self.build_bpe(bpe_dict)\n return model\n\n def build_generator(\n self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None,\n ):\n \"\"\"\n Build a :class:`~fairseq.SequenceGenerator` instance for this\n task.\n\n Args:\n models (List[~fairseq.models.FairseqModel]): ensemble of models\n args (fairseq.dataclass.configs.GenerationConfig):\n configuration object (dataclass) for generation\n extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass\n through to SequenceGenerator\n prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):\n If provided, this function constrains the beam search to\n allowed tokens only at each step. The provided function\n should take 2 arguments: the batch ID (`batch_id: int`)\n and a unidimensional tensor of token ids (`inputs_ids:\n torch.Tensor`). It has to return a `List[int]` with the\n allowed tokens for the next generation step conditioned\n on the previously generated tokens (`inputs_ids`) and\n the batch ID (`batch_id`). This argument is useful for\n constrained generation conditioned on the prefix, as\n described in \"Autoregressive Entity Retrieval\"\n (https://arxiv.org/abs/2010.00904) and\n https://github.com/facebookresearch/GENRE.\n \"\"\"\n if getattr(args, \"score_reference\", False):\n from fairseq.sequence_scorer import SequenceScorer\n\n return SequenceScorer(\n self.target_dictionary,\n compute_alignment=getattr(args, \"print_alignment\", False),\n )\n\n from fairseq.sequence_generator import (\n # SequenceGenerator,\n SequenceGeneratorWithAlignment,\n )\n from models.sequence_generator import SequenceGenerator\n\n # Choose search strategy. Defaults to Beam Search.\n sampling = getattr(args, \"sampling\", False)\n sampling_topk = getattr(args, \"sampling_topk\", -1)\n sampling_topp = getattr(args, \"sampling_topp\", -1.0)\n diverse_beam_groups = getattr(args, \"diverse_beam_groups\", -1)\n diverse_beam_strength = getattr(args, \"diverse_beam_strength\", 0.5)\n match_source_len = getattr(args, \"match_source_len\", False)\n diversity_rate = getattr(args, \"diversity_rate\", -1)\n constrained = getattr(args, \"constraints\", False)\n if prefix_allowed_tokens_fn is None:\n prefix_allowed_tokens_fn = getattr(args, \"prefix_allowed_tokens_fn\", None)\n if (\n sum(\n int(cond)\n for cond in [\n sampling,\n diverse_beam_groups > 0,\n match_source_len,\n diversity_rate > 0,\n ]\n )\n > 1\n ):\n raise ValueError(\"Provided Search parameters are mutually exclusive.\")\n assert sampling_topk < 0 or sampling, \"--sampling-topk requires --sampling\"\n assert sampling_topp < 0 or sampling, \"--sampling-topp requires --sampling\"\n\n if sampling:\n search_strategy = search.Sampling(\n self.target_dictionary, sampling_topk, sampling_topp\n )\n elif diverse_beam_groups > 0:\n search_strategy = search.DiverseBeamSearch(\n self.target_dictionary, diverse_beam_groups, diverse_beam_strength\n )\n elif match_source_len:\n # this is useful for tagging applications where the output\n # length should match the input length, so we hardcode the\n # length constraints for simplicity\n search_strategy = search.LengthConstrainedBeamSearch(\n self.target_dictionary,\n min_len_a=1,\n min_len_b=0,\n max_len_a=1,\n max_len_b=0,\n )\n elif diversity_rate > -1:\n search_strategy = search.DiverseSiblingsSearch(\n self.target_dictionary, diversity_rate\n )\n elif constrained:\n search_strategy = search.LexicallyConstrainedBeamSearch(\n self.target_dictionary, args.constraints\n )\n elif prefix_allowed_tokens_fn:\n search_strategy = search.PrefixConstrainedBeamSearch(\n self.target_dictionary, prefix_allowed_tokens_fn\n )\n else:\n search_strategy = search.BeamSearch(self.target_dictionary)\n\n extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}\n if seq_gen_cls is None:\n if getattr(args, \"print_alignment\", False):\n seq_gen_cls = SequenceGeneratorWithAlignment\n extra_gen_cls_kwargs[\"print_alignment\"] = args.print_alignment\n else:\n seq_gen_cls = SequenceGenerator\n\n return seq_gen_cls(\n models,\n self.target_dictionary,\n beam_size=getattr(args, \"beam\", 5),\n max_len_a=getattr(args, \"max_len_a\", 0),\n max_len_b=getattr(args, \"max_len_b\", 200),\n min_len=getattr(args, \"min_len\", 1),\n normalize_scores=(not getattr(args, \"unnormalized\", False)),\n len_penalty=getattr(args, \"lenpen\", 1),\n unk_penalty=getattr(args, \"unkpen\", 0),\n temperature=getattr(args, \"temperature\", 1.0),\n match_source_len=getattr(args, \"match_source_len\", False),\n no_repeat_ngram_size=getattr(args, \"no_repeat_ngram_size\", 0),\n search_strategy=search_strategy,\n constraint_range=self.cfg.constraint_range,\n **extra_gen_cls_kwargs,\n )\n\n def train_step(\n self, sample, model, criterion, optimizer, update_num, ignore_grad=False, **extra_kwargs\n ):\n \"\"\"\n Do forward and backward, and return the loss as computed by *criterion*\n for the given *model* and *sample*.\n\n Args:\n sample (dict): the mini-batch. The format is defined by the\n :class:`~fairseq.data.FairseqDataset`.\n model (~fairseq.models.BaseFairseqModel): the model\n criterion (~fairseq.criterions.FairseqCriterion): the criterion\n optimizer (~fairseq.optim.FairseqOptimizer): the optimizer\n update_num (int): the current update\n ignore_grad (bool): multiply loss by 0 if this is set to True\n\n Returns:\n tuple:\n - the loss\n - the sample size, which is used as the denominator for the\n gradient\n - logging outputs to display while training\n \"\"\"\n model.train()\n model.set_num_updates(update_num)\n with torch.autograd.profiler.record_function(\"forward\"):\n with torch.cuda.amp.autocast(enabled=(isinstance(optimizer, AMPOptimizer))):\n loss, sample_size, logging_output = criterion(model, sample, update_num=update_num)\n if ignore_grad:\n loss *= 0\n with torch.autograd.profiler.record_function(\"backward\"):\n optimizer.backward(loss)\n return loss, sample_size, logging_output\n\n def max_positions(self):\n \"\"\"Return the max sentence length allowed by the task.\"\"\"\n return (self.cfg.max_source_positions, self.cfg.max_target_positions)\n\n @property\n def source_dictionary(self):\n \"\"\"Return the source :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.src_dict\n\n @property\n def target_dictionary(self):\n \"\"\"Return the target :class:`~fairseq.data.Dictionary`.\"\"\"\n return self.tgt_dict" }, { "identifier": "OFAConfig", "path": "tasks/ofa_task.py", "snippet": "class OFAConfig(FairseqDataclass):\n data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"comma separated path to data list, will be iterated upon during epochs \"\n \"in round-robin manner; valid data are always in the last\"\n },\n )\n selected_cols: Optional[str] = field(\n default=None,\n metadata={\"help\": \"selected cols\"},\n )\n bpe: Optional[str] = field(\n default='gpt2',\n metadata={\"help\": \"which bpe to use\"},\n )\n bpe_dir: Optional[str] = field(\n default=None,\n metadata={\"help\": \"bpe dir\"},\n )\n max_source_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the source sequence\"}\n )\n max_target_positions: int = field(\n default=1024, metadata={\"help\": \"max number of tokens in the target sequence\"}\n )\n max_src_length: int = field(\n default=128, metadata={\"help\": \"the maximum src sequence length\"}\n )\n max_tgt_length: int = field(\n default=30, metadata={\"help\": \"the maximum target sequence length\"}\n )\n\n code_dict_size: int = field(\n default=8192, metadata={\"help\": \"code dict size\"}\n )\n patch_image_size: int = field(\n default=480, metadata={\"help\": \"patch image size\"}\n )\n orig_patch_image_size: int = field(\n default=256, metadata={\"help\": \"patch image size\"}\n )\n num_bins: int = field(\n default=1000, metadata={\"help\": \"number of quantization bins\"}\n )\n\n imagenet_default_mean_and_std: bool = field(\n default=False,\n metadata={\"help\": \"imagenet normalize\"},\n )\n constraint_range: Optional[str] = field(\n default=None,\n metadata={\"help\": \"constraint range\"}\n )" }, { "identifier": "UnifyDataset", "path": "data/pretrain_data/unify_dataset.py", "snippet": "class UnifyDataset(OFADataset):\n def __init__(\n self,\n split,\n dataset,\n bpe,\n src_dict,\n tgt_dict=None,\n max_src_length=128,\n max_tgt_length=30,\n seed=7,\n code_dict_size=8192,\n num_bins=1000,\n patch_image_size=384,\n code_image_size=128,\n pure_text_dataset=None,\n pure_image_dataset=None,\n detection_dataset=None,\n all_object_list=None,\n all_caption_list=None,\n type2ans_dict=None,\n ans2type_dict=None,\n max_image_size=512,\n mask_ratio=0.3,\n random_ratio=0.0,\n keep_ratio=0.0,\n mask_length=\"span-poisson\",\n poisson_lambda=3.0,\n replace_length=1\n ):\n super().__init__(split, dataset, bpe, src_dict, tgt_dict)\n self.max_src_length = max_src_length\n self.max_tgt_length = max_tgt_length\n self.seed = seed\n self.code_dict_size = code_dict_size\n self.num_bins = num_bins\n self.patch_image_size = patch_image_size\n self.code_image_size = code_image_size\n\n self.pure_text_dataset = pure_text_dataset\n self.pure_image_dataset = pure_image_dataset\n self.detection_dataset = detection_dataset\n self.epoch = 0\n\n self.all_object_list = all_object_list\n self.all_caption_list = all_caption_list\n self.type2ans_dict = type2ans_dict\n self.ans2type_dict = ans2type_dict\n\n self.mask_ratio = mask_ratio\n self.random_ratio = random_ratio\n self.keep_ratio = keep_ratio\n self.mask_length = mask_length\n self.poisson_lambda = poisson_lambda\n self.replace_length = replace_length\n if self.replace_length not in [-1, 0, 1]:\n raise ValueError(f\"invalid arg: replace_length={self.replace_length}\")\n if self.mask_length not in [\"subword\", \"word\", \"span-poisson\"]:\n raise ValueError(f\"invalid arg: mask-length={self.mask_length}\")\n if self.mask_length == \"subword\" and self.replace_length not in [0, 1]:\n raise ValueError(f\"if using subwords, use replace-length=1 or 0\")\n\n self.mask_idx = src_dict.index(\"<mask>\")\n self.mask_whole_word = (\n get_whole_word_mask(self.bpe, self.src_dict)\n if self.mask_length != \"subword\"\n else None\n )\n self.mask_span_distribution = None\n if self.mask_length == \"span-poisson\":\n _lambda = self.poisson_lambda\n lambda_to_the_k = 1\n e_to_the_minus_lambda = math.exp(-_lambda)\n k_factorial = 1\n ps = []\n for k in range(0, 128):\n ps.append(e_to_the_minus_lambda * lambda_to_the_k / k_factorial)\n lambda_to_the_k *= _lambda\n k_factorial *= k + 1\n if ps[-1] < 0.0000001:\n break\n ps = torch.FloatTensor(ps)\n self.mask_span_distribution = torch.distributions.Categorical(ps)\n\n self.pos_tgt_item = self.encode_text(\" yes\")\n self.neg_tgt_item = self.encode_text(\" no\")\n\n self.mask_left = self.mask_top = int(0.5 * self.code_image_size)\n self.mask_right = self.mask_bottom = int(1.5 * self.code_image_size)\n self.mask_ids = [\n i*self.code_image_size*2+j\n for i in range(self.code_image_size*2) for j in range(self.code_image_size*2)\n if not (self.mask_left <= i < self.mask_right and self.mask_top <= j < self.mask_bottom)\n ]\n\n scales = np.arange(patch_image_size, 481).tolist()\n\n # for image-text pair\n self.patch_resize_transform = transforms.Compose([\n T.RandomResize(scales, max_size=672),\n transforms.CenterCrop(patch_image_size),\n RandomAugment(2, 7, isPIL=True, augs=['Identity', 'AutoContrast', 'Equalize', 'Brightness', 'Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n # for pure image\n self.patch_crop_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),\n ])\n # for detection\n self.detection_transform = T.Compose([\n T.RandomHorizontalFlip(),\n T.LargeScaleJitter(output_size=self.code_image_size*2, aug_scale_min=1.0, aug_scale_max=1.5),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_image_size=max_image_size)\n ])\n # for visual grounding\n self.visual_grounding_transform = T.Compose([\n T.RandomResize(scales, max_size=672),\n T.ObjectCenterCrop((patch_image_size, patch_image_size)),\n T.ToTensor(),\n T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], max_image_size=max_image_size)\n ])\n\n def set_epoch(self, epoch, **unused):\n self.epoch = epoch\n\n def get_negative_caption(self, caption, gt_objects):\n prob = random.random()\n if gt_objects is not None and gt_objects != '' and prob > 0.6:\n gt_object = random.choice(gt_objects.strip().split('&&'))\n negative_object = random.choice(self.all_object_list[:-1])\n negative_object = self.all_object_list[-1] if negative_object == gt_object else negative_object\n negative_caption = caption.replace(gt_object, negative_object)\n else:\n negative_caption = random.choice(self.all_caption_list)\n return negative_caption\n\n def get_negative_answer(self, answer, conf):\n prob = random.random()\n if conf > (prob + 0.1) and answer in self.ans2type_dict:\n negative_answer_type = self.ans2type_dict[answer]\n if negative_answer_type == 'how many' and answer.isdigit() and prob > 0.5:\n negative_answer = int(answer) + random.choice([-1, 1]) if answer != 0 else 1\n else:\n negative_answer_list = self.type2ans_dict[negative_answer_type]\n negative_answer = random.choice(negative_answer_list[:-1])\n negative_answer = negative_answer_list[-1] if negative_answer == answer else negative_answer\n return negative_answer\n\n negative_answer_list = self.type2ans_dict['other']\n negative_answer = random.choice(negative_answer_list[:-1])\n negative_answer = negative_answer_list[-1] if negative_answer == answer else negative_answer\n return negative_answer\n\n def process_image_text_pair(self, index):\n uniq_id, image, caption, question, refs, gt_objects, dataset_name, type = self.dataset[index]\n\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n patch_image = self.patch_resize_transform(image) if type != 'visual_grounding' else None\n patch_mask = torch.tensor([True])\n conf = torch.tensor([1.0])\n if type == 'caption':\n tgt_caption = self.pre_caption(caption, self.max_tgt_length)\n pos_src_caption = self.pre_caption(caption, self.max_src_length)\n neg_src_caption = self.pre_caption(self.get_negative_caption(caption, gt_objects), self.max_src_length)\n src_item = self.encode_text(\" what does the image describe?\")\n tgt_item = self.encode_text(\" {}\".format(tgt_caption))\n pos_src_item = self.encode_text(' does the image describe \" {} \"?'.format(pos_src_caption))\n neg_src_item = self.encode_text(' does the image describe \" {} \"?'.format(neg_src_caption))\n elif type == 'qa':\n question = self.pre_question(question, self.max_src_length)\n ref_dict = {item.split('|!+')[1]: float(item.split('|!+')[0]) for item in refs.split('&&')}\n answer = max(ref_dict, key=ref_dict.get)\n conf = ref_dict[answer]\n src_item = self.encode_text(\" {}\".format(question))\n tgt_item = self.encode_text(\" {}\".format(answer))\n conf = torch.tensor([conf])\n pos_src_item = self.encode_text(' what is the answer to question \" {} \". is \" {} \"?'.format(question, answer))\n neg_src_item = self.encode_text(\n ' what is the answer to question \" {} \". is \" {} \"?'.format(question, self.get_negative_answer(answer, conf))\n )\n elif type == 'visual_grounding':\n conf = torch.tensor([1.0])\n w, h = image.size\n boxes_target = {\"boxes\": [], \"labels\": [], \"area\": [], \"size\": torch.tensor([h, w])}\n x0, y0, x1, y1 = refs.strip().split(',')\n boxes_target[\"boxes\"] = torch.tensor([[float(x0), float(y0), float(x1), float(y1)]])\n boxes_target[\"labels\"] = np.array([0])\n boxes_target[\"area\"] = torch.tensor([(float(x1) - float(x0)) * (float(y1) - float(y0))])\n patch_image, boxes_target = self.visual_grounding_transform(image, boxes_target)\n quant_x0 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][0] * (self.num_bins - 1)).round()))\n quant_y0 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][1] * (self.num_bins - 1)).round()))\n quant_x1 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][2] * (self.num_bins - 1)).round()))\n quant_y1 = \"<bin_{}>\".format(int((boxes_target[\"boxes\"][0][3] * (self.num_bins - 1)).round()))\n region_coord = \"{} {} {} {}\".format(quant_x0, quant_y0, quant_x1, quant_y1)\n src_caption = self.pre_caption(caption, self.max_src_length)\n src_item = self.encode_text(' which region does the text \" {} \" describe?'.format(src_caption))\n tgt_item = self.encode_text(region_coord, use_bpe=False)\n else:\n logger.info('type {} is not implemented'.format(type))\n raise NotImplementedError\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n pos_src_item = torch.cat([self.bos_item, pos_src_item, self.eos_item]) if type != 'visual_grounding' else None\n neg_src_item = torch.cat([self.bos_item, neg_src_item, self.eos_item]) if type != 'visual_grounding' else None\n\n if type == 'caption' and dataset_name == 'cc12m':\n target_item[:2] = self.src_dict.pad()\n target_item[-1] = self.eos_item\n\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n\n examples = [example]\n prob = random.random()\n if type == 'visual_grounding':\n region_example = example.copy()\n region_prefix_item = self.encode_text(' what does the region describe? region:')\n region_coord_item = self.encode_text('{}'.format(region_coord), use_bpe=False)\n region_src_item = torch.cat([region_prefix_item, region_coord_item])\n region_tgt_item = self.encode_text(' {}'.format(self.pre_caption(caption, self.max_tgt_length)))\n region_example[\"source\"] = torch.cat([self.bos_item, region_src_item, self.eos_item])\n region_example[\"target\"] = torch.cat([region_tgt_item, self.eos_item])\n region_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, region_tgt_item])\n region_example[\"conf\"] = torch.tensor([1.0])\n examples.append(region_example)\n elif prob >= 0.5 and self.split == 'train':\n pos_example = example.copy()\n pos_example[\"source\"] = pos_src_item\n pos_example[\"target\"] = torch.cat([self.pos_tgt_item, self.eos_item])\n pos_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, self.pos_tgt_item])\n examples.append(pos_example)\n elif self.split == 'train':\n neg_example = example.copy()\n neg_example[\"source\"] = neg_src_item\n neg_example[\"target\"] = torch.cat([self.neg_tgt_item, self.eos_item])\n neg_example[\"prev_output_tokens\"] = torch.cat([self.bos_item, self.neg_tgt_item])\n examples.append(neg_example)\n return examples\n\n def process_pure_text(self, index):\n patch_image = torch.zeros((3, self.code_image_size*2, self.code_image_size*2))\n patch_mask = torch.tensor([False])\n code_mask = torch.tensor([False])\n conf = torch.tensor([2.0])\n\n examples = []\n for _ in range(2):\n uniq_id, text = self.pure_text_dataset[index]\n text = text.strip().lower()\n text_item = self.encode_text(\" {}\".format(text), length=512)\n text_item = text_item[-256:]\n text_item = torch.cat([self.bos_item, text_item, self.eos_item])\n mask_text_item = self.add_whole_word_mask(text_item.clone(), self.mask_ratio)\n prefix_item = self.encode_text(' what is the complete text of \" \"?')\n src_item = torch.cat([prefix_item[:-2], mask_text_item[1:-1], prefix_item[-2:]])\n tgt_item = text_item[1:-1]\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n example = {\n \"id\": uniq_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n examples.append(example)\n\n return examples\n\n def process_pure_image(self, index):\n image_id, image, code = self.pure_image_dataset[index]\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n patch_image = self.patch_crop_transform(image)\n patch_image[:, self.mask_top:self.mask_bottom, self.mask_left:self.mask_right] = 0\n patch_mask = torch.tensor([True])\n src_item = self.encode_text(\" what is the image in the middle part?\")\n image_code = torch.LongTensor([int(num) for num in code.strip().split()])\n tgt_item = image_code + len(self.src_dict) - self.code_dict_size - self.num_bins\n code_mask = torch.tensor([True])\n conf = torch.tensor([2.0])\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n\n example = {\n \"id\": image_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n return [example]\n\n def process_detection(self, index):\n image_id, image, label = self.detection_dataset[index]\n image = Image.open(BytesIO(base64.urlsafe_b64decode(image))).convert(\"RGB\")\n\n w, h = image.size\n boxes_target = {\"boxes\": [], \"labels\": [], \"area\": [], \"size\": torch.tensor([h, w])}\n label_list = label.strip().split('&&')\n for label in label_list:\n x0, y0, x1, y1, cat_id, cat = label.strip().split(',', 5)\n boxes_target[\"boxes\"].append([float(x0), float(y0), float(x1), float(y1)])\n boxes_target[\"labels\"].append(cat)\n boxes_target[\"area\"].append((float(x1) - float(x0)) * (float(y1) - float(y0)))\n boxes_target[\"boxes\"] = torch.tensor(boxes_target[\"boxes\"])\n boxes_target[\"labels\"] = np.array(boxes_target[\"labels\"])\n boxes_target[\"area\"] = torch.tensor(boxes_target[\"area\"])\n\n patch_image, boxes_target = self.detection_transform(image, boxes_target)\n patch_mask = torch.tensor([True])\n code_mask = torch.tensor([False])\n conf = torch.tensor([2.0])\n\n quant_boxes = []\n for i, box in enumerate(boxes_target[\"boxes\"]):\n quant_boxes.extend([\"<bin_{}>\".format(int((pos * (self.num_bins - 1)).round())) for pos in box[:4]])\n quant_boxes.append(self.bpe.encode(' {}'.format(boxes_target[\"labels\"][i])))\n src_item = self.encode_text(' what are the objects in the image?')\n tgt_item = self.encode_text(' '.join(quant_boxes), use_bpe=False)\n\n src_item = torch.cat([self.bos_item, src_item, self.eos_item])\n target_item = torch.cat([tgt_item, self.eos_item])\n prev_output_item = torch.cat([self.bos_item, tgt_item])\n\n example = {\n \"id\": image_id,\n \"source\": src_item,\n \"patch_image\": patch_image,\n \"patch_mask\": patch_mask,\n \"code_mask\": code_mask,\n \"target\": target_item,\n \"prev_output_tokens\": prev_output_item,\n \"conf\": conf,\n }\n return [example]\n\n def __getitem__(self, index):\n with data_utils.numpy_seed(self.seed, self.epoch):\n pair_samples = self.process_image_text_pair(index)\n extra_samples = []\n if self.split == 'train' and self.dataset.data_cnt % 8 == 0:\n extra_samples += self.process_pure_text(0) if self.pure_text_dataset else []\n extra_samples += self.process_pure_image(0) if self.pure_image_dataset else []\n extra_samples += self.process_detection(0) if self.detection_dataset else []\n return pair_samples, extra_samples\n\n def word_starts(self, source):\n if self.mask_whole_word is not None:\n is_word_start = self.mask_whole_word.gather(0, source)\n else:\n is_word_start = torch.ones(source.size())\n is_word_start[0] = 0\n is_word_start[-1] = 0\n return is_word_start\n\n def add_whole_word_mask(self, source, p):\n is_word_start = self.word_starts(source)\n num_to_mask = int(math.ceil(is_word_start.float().sum() * p))\n num_inserts = 0\n if num_to_mask == 0:\n return source\n\n if self.mask_span_distribution is not None:\n lengths = self.mask_span_distribution.sample(sample_shape=(num_to_mask,))\n\n # Make sure we have enough to mask\n cum_length = torch.cumsum(lengths, 0)\n while cum_length[-1] < num_to_mask:\n lengths = torch.cat(\n [\n lengths,\n self.mask_span_distribution.sample(sample_shape=(num_to_mask,)),\n ],\n dim=0,\n )\n cum_length = torch.cumsum(lengths, 0)\n\n # Trim to masking budget\n i = 0\n while cum_length[i] < num_to_mask:\n i += 1\n lengths[i] = num_to_mask - (0 if i == 0 else cum_length[i - 1])\n num_to_mask = i + 1\n lengths = lengths[:num_to_mask]\n\n # Handle 0-length mask (inserts) separately\n lengths = lengths[lengths > 0]\n num_inserts = num_to_mask - lengths.size(0)\n num_to_mask -= num_inserts\n if num_to_mask == 0:\n return self.add_insertion_noise(source, num_inserts / source.size(0))\n\n assert (lengths > 0).all()\n else:\n lengths = torch.ones((num_to_mask,)).long()\n assert is_word_start[-1] == 0\n word_starts = is_word_start.nonzero(as_tuple=False)\n indices = word_starts[\n torch.randperm(word_starts.size(0))[:num_to_mask]\n ].squeeze(1)\n mask_random = torch.FloatTensor(num_to_mask).uniform_() < self.random_ratio\n\n source_length = source.size(0)\n assert source_length - 1 not in indices\n to_keep = torch.ones(source_length, dtype=torch.bool)\n is_word_start[\n -1\n ] = 255 # acts as a long length, so spans don't go over the end of doc\n if self.replace_length == 0:\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n\n if self.mask_span_distribution is not None:\n assert len(lengths.size()) == 1\n assert lengths.size() == indices.size()\n lengths -= 1\n while indices.size(0) > 0:\n assert lengths.size() == indices.size()\n lengths -= is_word_start[indices + 1].long()\n uncompleted = lengths >= 0\n indices = indices[uncompleted] + 1\n mask_random = mask_random[uncompleted]\n lengths = lengths[uncompleted]\n if self.replace_length != -1:\n # delete token\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n else:\n # A bit faster when all lengths are 1\n while indices.size(0) > 0:\n uncompleted = is_word_start[indices + 1] == 0\n indices = indices[uncompleted] + 1\n mask_random = mask_random[uncompleted]\n if self.replace_length != -1:\n # delete token\n to_keep[indices] = 0\n else:\n # keep index, but replace it with [MASK]\n source[indices] = self.mask_idx\n source[indices[mask_random]] = torch.randint(\n 4, len(self.tgt_dict) - self.code_dict_size - self.num_bins, size=(mask_random.sum(),)\n )\n\n assert source_length - 1 not in indices\n\n source = source[to_keep]\n\n if num_inserts > 0:\n source = self.add_insertion_noise(source, num_inserts / source.size(0))\n\n return source\n\n def add_insertion_noise(self, tokens, p):\n if p == 0.0:\n return tokens\n\n num_tokens = len(tokens)\n n = int(math.ceil(num_tokens * p))\n\n noise_indices = torch.randperm(num_tokens + n - 2)[:n] + 1\n noise_mask = torch.zeros(size=(num_tokens + n,), dtype=torch.bool)\n noise_mask[noise_indices] = 1\n result = torch.LongTensor(n + len(tokens)).fill_(-1)\n\n num_random = int(math.ceil(n * self.random_ratio))\n result[noise_indices[num_random:]] = self.mask_idx\n result[noise_indices[:num_random]] = torch.randint(\n low=4, high=len(self.tgt_dict)-self.code_dict_size-self.num_bins, size=(num_random,)\n )\n\n result[~noise_mask] = tokens\n\n assert (result >= 0).all()\n return result\n\n def collater(self, samples, pad_to_length=None):\n \"\"\"Merge samples of different tasks to form two mini-batches.\n Args:\n samples (List[Tuple]): samples to collate\n Returns:\n Tuple[dict]: two mini-batch containing the data of different tasks\n \"\"\"\n\n samples_v1 = [] # containing image-text pairs\n samples_v2 = [] # containing detection data, text data and image data\n for sample_tuple in samples:\n samples_v1 += sample_tuple[0]\n samples_v2 += sample_tuple[1]\n if samples_v2 != []:\n res_v1 = collate(samples_v1, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n res_v2 = collate(samples_v2, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n return res_v1, res_v2\n else:\n res_v1 = collate(samples_v1, pad_idx=self.src_dict.pad(), eos_idx=self.eos)\n return res_v1" }, { "identifier": "FileDataset", "path": "data/file_dataset.py", "snippet": "class FileDataset:\n def __init__(self, file_path, selected_col_ids=None, dtypes=None, separator=\"\\t\", cached_index=False):\n self.file_path = file_path\n assert os.path.exists(self.file_path), \"Error: The local datafile {} not exists!\".format(self.file_path)\n\n self.separator = separator\n if selected_col_ids is None:\n # default to all fields\n self.selected_col_ids = list(\n range(len(open(self.file_path).readline().rstrip(\"\\n\").split(self.separator))))\n else:\n self.selected_col_ids = [int(col_id) for col_id in selected_col_ids.split(\",\")]\n if dtypes is None:\n # default to str\n self.dtypes = [str for col_id in self.selected_col_ids]\n else:\n self.dtypes = [eval(col_dtype) for col_dtype in dtypes.split(\",\")]\n assert len(self.dtypes) == len(self.selected_col_ids)\n\n self.data_cnt = 0\n try:\n self.slice_id = torch.distributed.get_rank()\n self.slice_count = torch.distributed.get_world_size()\n except Exception:\n self.slice_id = 0\n self.slice_count = 1\n self.cached_index = cached_index\n self._init_seek_index()\n self._reader = self._get_reader()\n print(\"file {} slice_id {} row count {} total row count {}\".format(\n self.file_path, self.slice_id, self.row_count, self.total_row_count)\n )\n\n def _init_seek_index(self):\n if self.cached_index:\n cache_path = \"{}.index\".format(self.file_path)\n assert os.path.exists(cache_path), \"cache file {} not exists!\".format(cache_path)\n self.total_row_count, self.lineid_to_offset = pickle.load(open(cache_path, \"rb\"))\n print(\"local datafile {} slice_id {} use cached row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n else:\n # make an iteration over the file to get row_count and line_idx-to-offset mapping\n fp = open(self.file_path, \"r\")\n print(\"local datafile {} slice_id {} begin to initialize row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n self.total_row_count = 0\n offset = 0\n self.lineid_to_offset = []\n for line in fp:\n self.lineid_to_offset.append(offset)\n self.total_row_count += 1\n offset += len(line.encode('utf-8'))\n self._compute_start_pos_and_row_count()\n print(\"local datafile {} slice_id {} finished initializing row_count and line_idx-to-offset mapping\".format(\n self.file_path, self.slice_id))\n\n def _compute_start_pos_and_row_count(self):\n self.row_count = self.total_row_count // self.slice_count\n if self.slice_id < self.total_row_count - self.row_count * self.slice_count:\n self.row_count += 1\n self.start_pos = self.row_count * self.slice_id\n else:\n self.start_pos = self.row_count * self.slice_id + (self.total_row_count - self.row_count * self.slice_count)\n\n def _get_reader(self):\n fp = open(self.file_path, \"r\")\n fp.seek(self.lineid_to_offset[self.start_pos])\n return fp\n\n def _seek(self, offset=0):\n try:\n print(\"slice_id {} seek offset {}\".format(self.slice_id, self.start_pos + offset))\n self._reader.seek(self.lineid_to_offset[self.start_pos + offset])\n self.data_cnt = offset\n except Exception:\n print(\"slice_id {} seek offset {}\".format(self.slice_id, offset))\n self._reader.seek(self.lineid_to_offset[offset])\n self.data_cnt = offset\n\n def __del__(self):\n self._reader.close()\n\n def __len__(self):\n return self.row_count\n\n def get_total_row_count(self):\n return self.total_row_count\n\n def __getitem__(self, index):\n if self.data_cnt == self.row_count:\n print(\"reach the end of datafile, start a new reader\")\n self.data_cnt = 0\n self._reader = self._get_reader()\n column_l = self._reader.readline().rstrip(\"\\n\").split(self.separator)\n self.data_cnt += 1\n try:\n column_l = [dtype(column_l[col_id]) for col_id, dtype in zip(self.selected_col_ids, self.dtypes)]\n except IndexError:\n print('Stop')\n return column_l" } ]
from dataclasses import dataclass, field from typing import Optional from fairseq.tasks import register_task from fairseq.data import FairseqDataset, iterators from tasks.ofa_task import OFATask, OFAConfig from data.pretrain_data.unify_dataset import UnifyDataset from data.file_dataset import FileDataset import json import logging import os import math
11,373
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig) class UnifyTask(OFATask): def __init__(self, cfg: UnifyConfig, src_dict, tgt_dict): super().__init__(cfg, src_dict, tgt_dict) self.type2ans_dict = json.load(open(os.path.join(self.cfg.neg_sample_dir, 'type2ans.json'))) self.ans2type_dict = {} for type, answer_list in self.type2ans_dict.items(): if type == 'other': continue for answer in answer_list: self.ans2type_dict[answer] = type self.all_object_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'object.txt')) if row.strip() != '' ] self.all_caption_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'all_captions.txt')) if row.strip() != '' ] self.pure_text_dataset = None self.pure_image_dataset = None self.detection_dataset = None if self.cfg.text_data is not None:
# Copyright 2022 The OFA-Sys Team. # All rights reserved. # This source code is licensed under the Apache 2.0 license # found in the LICENSE file in the root directory. logger = logging.getLogger(__name__) @dataclass class UnifyConfig(OFAConfig): max_image_size: int = field( default=512, metadata={"help": ""} ) text_data: Optional[str] = field( default=None, metadata={"help": "pure text data"}, ) image_data: Optional[str] = field( default=None, metadata={"help": "pure image data"}, ) detection_data: Optional[str] = field( default=None, metadata={"help": "detection data"}, ) text_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure text data selected cols"}, ) image_selected_cols: Optional[str] = field( default=None, metadata={"help": "pure image data selected cols"}, ) detection_selected_cols: Optional[str] = field( default=None, metadata={"help": "detection data selected cols"}, ) neg_sample_dir: Optional[str] = field( default=None, metadata={"help": "negative sample directory, which contains captions (taken from all image-text pairs), " "answers (taken from VQA), " "objects (taken form OpenImages) "}, ) code_image_size: int = field( default=128, metadata={"help": "the resolution of the generated image in the image infilling task"} ) pretrain_seed: int = field( default=7, metadata={"help": "pretrain seed"}, ) mask_ratio: float = field( default=0.3, metadata={"help": "fraction of words/subwords that will be masked"}, ) random_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], use random token this often"}, ) keep_ratio: float = field( default=0.0, metadata={"help": "instead of using [MASK], keep original token this often"}, ) mask_length: str = field( default="span-poisson", metadata={"help": "mask length to choose ['subword', 'word', 'span-poisson']"}, ) poisson_lambda: float = field( default=3.0, metadata={"help": "randomly shuffle sentences for this proportion of inputs"}, ) replace_length: int = field( default=1, metadata={"help": "when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)"}, ) @register_task("unify_task", dataclass=UnifyConfig) class UnifyTask(OFATask): def __init__(self, cfg: UnifyConfig, src_dict, tgt_dict): super().__init__(cfg, src_dict, tgt_dict) self.type2ans_dict = json.load(open(os.path.join(self.cfg.neg_sample_dir, 'type2ans.json'))) self.ans2type_dict = {} for type, answer_list in self.type2ans_dict.items(): if type == 'other': continue for answer in answer_list: self.ans2type_dict[answer] = type self.all_object_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'object.txt')) if row.strip() != '' ] self.all_caption_list = [ row.strip() for row in open(os.path.join(self.cfg.neg_sample_dir, 'all_captions.txt')) if row.strip() != '' ] self.pure_text_dataset = None self.pure_image_dataset = None self.detection_dataset = None if self.cfg.text_data is not None:
self.pure_text_dataset = FileDataset(self.cfg.text_data, self.cfg.text_selected_cols)
3
2023-10-20 20:01:42+00:00
16k
timapage/pyqt6-yolov8
main.py
[ { "identifier": "CameraCaptureThread", "path": "src/qt/stream/video_capture.py", "snippet": "class CameraCaptureThread(QThread):\n send_video_info = pyqtSignal(dict)\n send_frame = pyqtSignal(list)\n def __init__(self):\n super(CameraCaptureThread, self).__init__()\n self.thread_name = \"CameraCaptureThread\"\n self.threadFlag = False\n \n def set_start_config(self, video_source):\n self.threadFlag = True\n self.get_video_source(video_source)\n \n def get_video_source(self, video_source):\n self.video_source = video_source\n \n def get_video_info(self, video_cap):\n video_info = {}\n video_info[\"FPS\"] = video_cap.get(cv.CAP_PROP_FPS)\n video_info[\"length\"] = int(video_cap.get(cv.CAP_PROP_FRAME_COUNT))\n video_info[\"size\"] = (int(video_cap.get(cv.CAP_PROP_FRAME_WIDTH)),int(video_cap.get(cv.CAP_PROP_FRAME_HEIGHT)))\n return video_info\n \n def stop_capture(self):\n self.threadFlag = False\n\n def run(self): \n cap = cv.VideoCapture(self.video_source)\n if not cap.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_info = self.get_video_info(cap)\n self.send_video_info.emit(video_info)\n\n idx_frame = 0\n while self.threadFlag:\n ret, frame = cap.read()\n if ret is False or self.threadFlag is False:\n break\n self.send_frame.emit(list([idx_frame,frame]))\n idx_frame += 1\n self.send_frame.emit(list([None,None]))\n cap.release()" }, { "identifier": "VideoVisualizationThread", "path": "src/qt/stream/visualize.py", "snippet": "class VideoVisualizationThread(QThread):\n send_thread_start_stop_flag = pyqtSignal(str)\n send_displayable_frame = pyqtSignal(QImage)\n send_ai_output = pyqtSignal(list)\n def __init__(self):\n super(VideoVisualizationThread, self).__init__()\n self.thread_name = \"VideoVisualizationThread\"\n self.threadFlag = False\n \n def set_start_config(self, screen_size):\n self.threadFlag = True\n self.frame_buffer = FrameBuffer(10)\n self.ai_output = []\n self.get_screen_size(screen_size)\n \n def get_fresh_frame(self, frame_list):\n self.frame_buffer.put(frame=copy.deepcopy(frame_list[1]), frame_id=frame_list[0], realtime=True)\n\n def get_ai_output(self, ai_output):\n self.ai_output = copy.deepcopy(ai_output)\n \n def get_screen_size(self, screen_size):\n self.iw, self.ih = screen_size\n \n def stop_display(self):\n self.threadFlag = False\n\n def run(self):\n self.send_thread_start_stop_flag.emit(\"processing_on_camera\")\n while self.threadFlag:\n frame_id, frame = self.frame_buffer.get()\n if frame_id is not None:\n frame = draw_results(frame, self.ai_output)\n show_image = self.convert_cv_qt(frame, self.ih, self.iw)\n self.send_displayable_frame.emit(show_image)\n self.send_ai_output.emit(self.ai_output)\n else:\n break\n blank_image = np.zeros((self.ih, self.iw, 3))\n blank_image = cv.cvtColor(blank_image.astype('uint8'), cv.COLOR_BGR2RGBA)\n show_image = QImage(blank_image.data, blank_image.shape[1], blank_image.shape[0], QImage.Format.Format_RGBA8888)\n self.send_displayable_frame.emit(show_image)\n self.send_ai_output.emit([])\n self.send_thread_start_stop_flag.emit(\"waiting_for_setting\")\n\n\n def convert_cv_qt(self, image, screen_height, screen_width):\n h, w, _ = image.shape\n scale = min(screen_width / w, screen_height / h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv.resize(image, (nw, nh))\n image_paded = np.full(shape=[screen_height, screen_width, 3], fill_value=0)\n dw, dh = (screen_width - nw) // 2, (screen_height - nh) // 2\n image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized\n image_paded = cv.cvtColor(image_paded.astype('uint8'), cv.COLOR_BGR2RGBA)\n return QImage(image_paded.data, image_paded.shape[1], image_paded.shape[0], QImage.Format.Format_RGBA8888)" }, { "identifier": "AiWorkerThread", "path": "src/qt/stream/ai_worker.py", "snippet": "class AiWorkerThread(QThread):\n send_ai_output = pyqtSignal(list)\n def __init__(self):\n super(AiWorkerThread, self).__init__()\n self.thread_name = \"AiWorkerThread\"\n self.threadFlag = False\n \n def set_start_config(self, ai_task, model_name=\"yolov8n\", confidence_threshold=0.35, iou_threshold=0.45):\n self.threadFlag = True\n self.ai_task = ai_task\n self.latest_frame = LatestFrame()\n self.confi_thr = confidence_threshold\n self.iou_thr = iou_threshold\n self.model_name = model_name\n self._init_yolo()\n self._init_tracker()\n\n def set_iou_threshold(self, iou_threshold):\n self.iou_thr = iou_threshold\n \n def set_confidence_threshold(self, confidence_threshold):\n self.confi_thr = confidence_threshold\n \n def set_model_name(self, model_name):\n self.model_name = model_name\n\n def _init_yolo(self):\n if self.ai_task == \"object_detection\":\n self.detector = YoloDetector()\n self.detector.init(\n model_path=os.path.join(ROOT, f\"weights/detection/{self.model_name}.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n self.pose_detector = PoseDetector()\n self.pose_detector.init(\n model_path=os.path.join(ROOT, f\"weights/pose/{self.model_name}-pose.onnx\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"segmentation\":\n self.seg_detector = YOLOSeg()\n self.seg_detector.init(\n model_path=os.path.join(ROOT, f\"weights/segmentation/{self.model_name}-seg.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n\n def _init_tracker(self):\n self.tracker = DeepSort(\n model_path=os.path.join(ROOT, f\"src/models/tracking/deep_sort/deep/checkpoint/ckpt.t7\"))\n \n def get_frame(self, frame_list):\n self.latest_frame.put(frame=frame_list[1], frame_id=frame_list[0], realtime=True)\n \n def stop_process(self):\n self.threadFlag = False\n \n def run(self):\n while self.threadFlag:\n frame_id, frame = self.latest_frame.get()\n if frame_id is None:\n break\n model_output = []\n if self.ai_task == \"object_detection\":\n model_output = self.detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n model_output = self.pose_detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"segmentation\":\n model_output = self.seg_detector.inference(frame, self.confi_thr, self.iou_thr)\n\n model_output = self.tracker.update(\n detection_results=model_output,\n ori_img=frame)\n \n self.model_output = add_image_id(model_output, frame_id)\n self.send_ai_output.emit(model_output)" }, { "identifier": "Ui_MainWindow", "path": "src/ui/main_window.py", "snippet": "class Ui_MainWindow(object):\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(878, 617)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Minimum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())\n MainWindow.setSizePolicy(sizePolicy)\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\":/images/icons/icon.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n MainWindow.setWindowIcon(icon)\n MainWindow.setStyleSheet(\"background-color: rgb(119, 118, 123);\\n\"\n\"border-color: rgb(119, 118, 123);\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setStyleSheet(\"background-color: rgb(119, 118, 123);\\n\"\n\"border-color: rgb(119, 118, 123);\")\n self.centralwidget.setObjectName(\"centralwidget\")\n self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.centralwidget)\n self.horizontalLayout_4.setObjectName(\"horizontalLayout_4\")\n self.verticalLayout = QtWidgets.QVBoxLayout()\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.horizontalLayout = QtWidgets.QHBoxLayout()\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\n self.verticalLayout_2 = QtWidgets.QVBoxLayout()\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\n self.groupBox = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu\")\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox.setFont(font)\n self.groupBox.setStyleSheet(\"\")\n self.groupBox.setObjectName(\"groupBox\")\n self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.groupBox)\n self.verticalLayout_4.setObjectName(\"verticalLayout_4\")\n self.radioButton_det = QtWidgets.QRadioButton(self.groupBox)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.radioButton_det.setFont(font)\n self.radioButton_det.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"\")\n self.radioButton_det.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.Zimbabwe))\n self.radioButton_det.setChecked(True)\n self.radioButton_det.setObjectName(\"radioButton_det\")\n self.verticalLayout_4.addWidget(self.radioButton_det)\n self.radioButton_seg = QtWidgets.QRadioButton(self.groupBox)\n self.radioButton_seg.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\" color: rgb(0, 0, 0);\\n\"\n\"}\")\n self.radioButton_seg.setObjectName(\"radioButton_seg\")\n self.verticalLayout_4.addWidget(self.radioButton_seg)\n self.radioButton_pose = QtWidgets.QRadioButton(self.groupBox)\n self.radioButton_pose.setStyleSheet(\"QRadioButton\\n\"\n\"{font-size: 16px;\\n\"\n\" font-weight: bold;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);;}\\n\"\n\"\\n\"\n\"QRadioButton::indicator {\\n\"\n\" width: 20px;\\n\"\n\" height: 20px;\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:unchecked {\\n\"\n\" image: url(:/images/icons/button-off.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::indicator:checked {\\n\"\n\" \\n\"\n\" image: url(:/images/icons/button-on.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QRadioButton::disabled{\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\"}\")\n self.radioButton_pose.setObjectName(\"radioButton_pose\")\n self.verticalLayout_4.addWidget(self.radioButton_pose)\n self.verticalLayout_2.addWidget(self.groupBox)\n self.groupBox_2 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_2.setFont(font)\n self.groupBox_2.setObjectName(\"groupBox_2\")\n self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.groupBox_2)\n self.horizontalLayout_2.setObjectName(\"horizontalLayout_2\")\n self.comboBox_model = QtWidgets.QComboBox(self.groupBox_2)\n self.comboBox_model.setAutoFillBackground(False)\n self.comboBox_model.setStyleSheet(\"QComboBox QAbstractItemView {\\n\"\n\"font-size: 16px;\\n\"\n\"outline:none;\\n\"\n\"border:none;}\\n\"\n\"\\n\"\n\"QComboBox{\\n\"\n\"font-size: 16px;\\n\"\n\"\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"border-width:0px;\\n\"\n\"border-color:white;\\n\"\n\"border-style:solid;\\n\"\n\"background-color: rgba(200, 200, 200,50);}\\n\"\n\"\\n\"\n\"QComboBox::drop-down {\\n\"\n\"margin-top:1;\\n\"\n\"height:20;\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"background-color: rgba(200, 200, 200,50);\\n\"\n\"border-image: url(:/images/icons/roll_down.png);\\n\"\n\"}\\n\"\n\"\\n\"\n\"QComboBox::disabled{\\n\"\n\"color: rgb(0, 0, 0);\\n\"\n\"}\\n\"\n\"\")\n self.comboBox_model.setCurrentText(\"YOLOv8n\")\n self.comboBox_model.setObjectName(\"comboBox_model\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.comboBox_model.addItem(\"\")\n self.horizontalLayout_2.addWidget(self.comboBox_model)\n self.verticalLayout_2.addWidget(self.groupBox_2)\n self.groupBox_3 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_3.setFont(font)\n self.groupBox_3.setObjectName(\"groupBox_3\")\n self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.groupBox_3)\n self.horizontalLayout_3.setObjectName(\"horizontalLayout_3\")\n self.pushButton_file = QtWidgets.QPushButton(self.groupBox_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_file.sizePolicy().hasHeightForWidth())\n self.pushButton_file.setSizePolicy(sizePolicy)\n self.pushButton_file.setStyleSheet(\"QPushButton{\\n\"\n\" image: url(:/images/icons/video.png);\\n\"\n\"font-size: 14px;\\n\"\n\"font-weight: bold;\\n\"\n\"color:white;\\n\"\n\"text-align: center center;\\n\"\n\"padding-left: 5px;\\n\"\n\"padding-right: 5px;\\n\"\n\"padding-top: 4px;\\n\"\n\"padding-bottom: 4px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-color: rgba(255, 255, 255, 255);\\n\"\n\"border-radius: 3px;\\n\"\n\"background-color: rgba(200, 200, 200,0);}\\n\"\n\"\\n\"\n\"QPushButton:focus{outline: none;}\\n\"\n\"\\n\"\n\"QPushButton::pressed{\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;\\n\"\n\" background-color: #bf513b;}\\n\"\n\"\\n\"\n\"QPushButton::disabled{\\n\"\n\" image: url(:/images/icons/video_off.png);\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(48,148,243,80);}\")\n self.pushButton_file.setText(\"\")\n self.pushButton_file.setObjectName(\"pushButton_file\")\n self.horizontalLayout_3.addWidget(self.pushButton_file)\n self.pushButton_cam = QtWidgets.QPushButton(self.groupBox_3)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_cam.sizePolicy().hasHeightForWidth())\n self.pushButton_cam.setSizePolicy(sizePolicy)\n self.pushButton_cam.setStyleSheet(\"QPushButton{\\n\"\n\" image: url(:/images/icons/camera_on.png);\\n\"\n\"font-size: 14px;\\n\"\n\"font-weight: bold;\\n\"\n\"color:white;\\n\"\n\"text-align: center center;\\n\"\n\"padding-left: 5px;\\n\"\n\"padding-right: 5px;\\n\"\n\"padding-top: 4px;\\n\"\n\"padding-bottom: 4px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-color: rgba(255, 255, 255, 255);\\n\"\n\"border-radius: 3px;\\n\"\n\"background-color: rgba(200, 200, 200,0);}\\n\"\n\"\\n\"\n\"QPushButton:focus{outline: none;}\\n\"\n\"\\n\"\n\"QPushButton::pressed{\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;\\n\"\n\" background-color: #bf513b;}\\n\"\n\"\\n\"\n\"QPushButton::disabled{\\n\"\n\" image: url(:/images/icons/camera_off.png);\\n\"\n\" font-size: 14px;\\n\"\n\" font-weight: bold;\\n\"\n\" color:rgb(200,200,200);\\n\"\n\" text-align: center center;\\n\"\n\" padding-left: 5px;\\n\"\n\" padding-right: 5px;\\n\"\n\" padding-top: 4px;\\n\"\n\" padding-bottom: 4px;\\n\"\n\" border-style: solid;\\n\"\n\" border-width: 0px;\\n\"\n\" border-color: rgba(255, 255, 255, 255);\\n\"\n\" border-radius: 3px;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(48,148,243,80);}url(:/images/icons/camera_on.png)\")\n self.pushButton_cam.setText(\"\")\n self.pushButton_cam.setObjectName(\"pushButton_cam\")\n self.horizontalLayout_3.addWidget(self.pushButton_cam)\n self.verticalLayout_2.addWidget(self.groupBox_3)\n self.groupBox_4 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_4.setFont(font)\n self.groupBox_4.setObjectName(\"groupBox_4\")\n self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.groupBox_4)\n self.horizontalLayout_5.setObjectName(\"horizontalLayout_5\")\n self.doubleSpinBox_conf = QtWidgets.QDoubleSpinBox(self.groupBox_4)\n self.doubleSpinBox_conf.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_conf.setMaximum(1.0)\n self.doubleSpinBox_conf.setSingleStep(0.01)\n self.doubleSpinBox_conf.setStepType(QtWidgets.QAbstractSpinBox.StepType.AdaptiveDecimalStepType)\n self.doubleSpinBox_conf.setProperty(\"value\", 0.3)\n self.doubleSpinBox_conf.setObjectName(\"doubleSpinBox_conf\")\n self.horizontalLayout_5.addWidget(self.doubleSpinBox_conf)\n self.horizontalSlider_conf = QtWidgets.QSlider(self.groupBox_4)\n self.horizontalSlider_conf.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_conf.setMaximum(99)\n self.horizontalSlider_conf.setSingleStep(1)\n self.horizontalSlider_conf.setPageStep(99)\n self.horizontalSlider_conf.setProperty(\"value\", 30)\n self.horizontalSlider_conf.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_conf.setObjectName(\"horizontalSlider_conf\")\n self.horizontalLayout_5.addWidget(self.horizontalSlider_conf)\n self.verticalLayout_2.addWidget(self.groupBox_4)\n self.groupBox_5 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_5.setFont(font)\n self.groupBox_5.setObjectName(\"groupBox_5\")\n self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.groupBox_5)\n self.horizontalLayout_6.setObjectName(\"horizontalLayout_6\")\n self.doubleSpinBox_iou = QtWidgets.QDoubleSpinBox(self.groupBox_5)\n self.doubleSpinBox_iou.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_iou.setMaximum(1.0)\n self.doubleSpinBox_iou.setSingleStep(0.01)\n self.doubleSpinBox_iou.setStepType(QtWidgets.QAbstractSpinBox.StepType.AdaptiveDecimalStepType)\n self.doubleSpinBox_iou.setProperty(\"value\", 0.45)\n self.doubleSpinBox_iou.setObjectName(\"doubleSpinBox_iou\")\n self.horizontalLayout_6.addWidget(self.doubleSpinBox_iou)\n self.horizontalSlider_iou = QtWidgets.QSlider(self.groupBox_5)\n self.horizontalSlider_iou.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_iou.setProperty(\"value\", 45)\n self.horizontalSlider_iou.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_iou.setObjectName(\"horizontalSlider_iou\")\n self.horizontalLayout_6.addWidget(self.horizontalSlider_iou)\n self.verticalLayout_2.addWidget(self.groupBox_5)\n self.groupBox_6 = QtWidgets.QGroupBox(self.centralwidget)\n font = QtGui.QFont()\n font.setPointSize(13)\n font.setBold(True)\n font.setWeight(75)\n self.groupBox_6.setFont(font)\n self.groupBox_6.setObjectName(\"groupBox_6\")\n self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.groupBox_6)\n self.horizontalLayout_7.setObjectName(\"horizontalLayout_7\")\n self.doubleSpinBox_interval = QtWidgets.QDoubleSpinBox(self.groupBox_6)\n self.doubleSpinBox_interval.setStyleSheet(\"QDoubleSpinBox{\\n\"\n\"background:rgba(200, 200, 200,50);\\n\"\n\"color:white;\\n\"\n\"font-size: 14px;\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 1px;\\n\"\n\"border-color: rgba(200, 200, 200,100);\\n\"\n\"border-radius: 3px;}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::down-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"QDoubleSpinBox::down-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_down.png);}\\n\"\n\"\\n\"\n\"QDoubleSpinBox::up-button{\\n\"\n\"background:rgba(200, 200, 200,0);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\\n\"\n\"QDoubleSpinBox::up-button::hover{\\n\"\n\"background:rgba(200, 200, 200,100);\\n\"\n\"border-image: url(:/images/icons/botton_up.png);}\")\n self.doubleSpinBox_interval.setDecimals(0)\n self.doubleSpinBox_interval.setMaximum(10.0)\n self.doubleSpinBox_interval.setObjectName(\"doubleSpinBox_interval\")\n self.horizontalLayout_7.addWidget(self.doubleSpinBox_interval)\n self.horizontalSlider_interval = QtWidgets.QSlider(self.groupBox_6)\n self.horizontalSlider_interval.setStyleSheet(\"QSlider{\\n\"\n\"border-color: #bcbcbc;\\n\"\n\"color:#d9d9d9;\\n\"\n\"}\\n\"\n\"QSlider::groove:horizontal { \\n\"\n\" border: 1px solid #999999; \\n\"\n\" height: 3px; \\n\"\n\" margin: 0px 0; \\n\"\n\" left: 5px; right: 5px; \\n\"\n\" }\\n\"\n\"QSlider::handle:horizontal { \\n\"\n\" border: 0px ; \\n\"\n\" border-image: url(:/images/icons/point.png);\\n\"\n\" width:15px;\\n\"\n\" margin: -7px -7px -7px -7px; \\n\"\n\"} \\n\"\n\"QSlider::add-page:horizontal{\\n\"\n\"background: #d9d9d9; \\n\"\n\"\\n\"\n\"}\\n\"\n\"QSlider::sub-page:horizontal{ \\n\"\n\" background: #373737; \\n\"\n\"}\")\n self.horizontalSlider_interval.setMaximum(10)\n self.horizontalSlider_interval.setPageStep(1)\n self.horizontalSlider_interval.setOrientation(QtCore.Qt.Orientation.Horizontal)\n self.horizontalSlider_interval.setObjectName(\"horizontalSlider_interval\")\n self.horizontalLayout_7.addWidget(self.horizontalSlider_interval)\n self.verticalLayout_2.addWidget(self.groupBox_6)\n self.verticalLayout_2.setStretch(0, 3)\n self.verticalLayout_2.setStretch(1, 1)\n self.verticalLayout_2.setStretch(2, 2)\n self.verticalLayout_2.setStretch(3, 1)\n self.verticalLayout_2.setStretch(4, 1)\n self.verticalLayout_2.setStretch(5, 1)\n self.horizontalLayout.addLayout(self.verticalLayout_2)\n self.verticalLayout_3 = QtWidgets.QVBoxLayout()\n self.verticalLayout_3.setObjectName(\"verticalLayout_3\")\n self.label_display = QtWidgets.QLabel(self.centralwidget)\n self.label_display.setStyleSheet(\"background-color: rgb(0, 0, 0);\")\n self.label_display.setText(\"\")\n self.label_display.setObjectName(\"label_display\")\n self.verticalLayout_3.addWidget(self.label_display)\n self.horizontalLayout_8 = QtWidgets.QHBoxLayout()\n self.horizontalLayout_8.setObjectName(\"horizontalLayout_8\")\n self.pushButton_play = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_play.sizePolicy().hasHeightForWidth())\n self.pushButton_play.setSizePolicy(sizePolicy)\n self.pushButton_play.setMinimumSize(QtCore.QSize(40, 40))\n self.pushButton_play.setStyleSheet(\"QPushButton {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 0);\\n\"\n\"}\\n\"\n\"QPushButton::focus{outline: none;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 150);\\n\"\n\"}\")\n self.pushButton_play.setText(\"\")\n icon1 = QtGui.QIcon()\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Disabled, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Disabled, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Active, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Active, QtGui.QIcon.State.On)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/pause.png\"), QtGui.QIcon.Mode.Selected, QtGui.QIcon.State.Off)\n icon1.addPixmap(QtGui.QPixmap(\":/images/icons/run.png\"), QtGui.QIcon.Mode.Selected, QtGui.QIcon.State.On)\n self.pushButton_play.setIcon(icon1)\n self.pushButton_play.setIconSize(QtCore.QSize(30, 30))\n self.pushButton_play.setCheckable(True)\n self.pushButton_play.setObjectName(\"pushButton_play\")\n self.horizontalLayout_8.addWidget(self.pushButton_play)\n self.progressBar_play = QtWidgets.QProgressBar(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.progressBar_play.sizePolicy().hasHeightForWidth())\n self.progressBar_play.setSizePolicy(sizePolicy)\n self.progressBar_play.setMinimumSize(QtCore.QSize(0, 0))\n self.progressBar_play.setStyleSheet(\"QProgressBar{ \\n\"\n\"color: rgb(255, 255, 255); \\n\"\n\"font:12pt;\\n\"\n\" border-radius:2px; \\n\"\n\"text-align:center; \\n\"\n\"border:none; \\n\"\n\"background-color: rgba(215, 215, 215,100);} \\n\"\n\"\\n\"\n\"QProgressBar:chunk{ \\n\"\n\"border-radius:0px; \\n\"\n\"background: rgba(55, 55, 55, 200);}\")\n self.progressBar_play.setMaximum(1000)\n self.progressBar_play.setProperty(\"value\", 0)\n self.progressBar_play.setFormat(\"\")\n self.progressBar_play.setObjectName(\"progressBar_play\")\n self.horizontalLayout_8.addWidget(self.progressBar_play)\n self.pushButton_stop = QtWidgets.QPushButton(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.pushButton_stop.sizePolicy().hasHeightForWidth())\n self.pushButton_stop.setSizePolicy(sizePolicy)\n self.pushButton_stop.setMinimumSize(QtCore.QSize(40, 40))\n self.pushButton_stop.setStyleSheet(\"QPushButton {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 0);\\n\"\n\"}\\n\"\n\"QPushButton::focus{outline: none;}\\n\"\n\"QPushButton::hover {\\n\"\n\"border-style: solid;\\n\"\n\"border-width: 0px;\\n\"\n\"border-radius: 0px;\\n\"\n\"background-color: rgba(223, 223, 223, 150);}\")\n self.pushButton_stop.setText(\"\")\n icon2 = QtGui.QIcon()\n icon2.addPixmap(QtGui.QPixmap(\":/images/icons/stop.png\"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)\n self.pushButton_stop.setIcon(icon2)\n self.pushButton_stop.setIconSize(QtCore.QSize(30, 30))\n self.pushButton_stop.setObjectName(\"pushButton_stop\")\n self.horizontalLayout_8.addWidget(self.pushButton_stop)\n self.horizontalLayout_8.setStretch(0, 1)\n self.horizontalLayout_8.setStretch(1, 12)\n self.horizontalLayout_8.setStretch(2, 1)\n self.verticalLayout_3.addLayout(self.horizontalLayout_8)\n self.tableWidget_results = QtWidgets.QTableWidget(self.centralwidget)\n sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.tableWidget_results.sizePolicy().hasHeightForWidth())\n self.tableWidget_results.setSizePolicy(sizePolicy)\n font = QtGui.QFont()\n font.setFamily(\"Ubuntu\")\n font.setPointSize(11)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget_results.setFont(font)\n self.tableWidget_results.setAutoFillBackground(True)\n self.tableWidget_results.setStyleSheet(\"\")\n self.tableWidget_results.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarPolicy.ScrollBarAsNeeded)\n self.tableWidget_results.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.SizeAdjustPolicy.AdjustToContents)\n self.tableWidget_results.setObjectName(\"tableWidget_results\")\n self.tableWidget_results.setColumnCount(4)\n self.tableWidget_results.setRowCount(0)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setPointSize(12)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget_results.setHorizontalHeaderItem(3, item)\n self.tableWidget_results.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget_results.horizontalHeader().setSortIndicatorShown(False)\n self.tableWidget_results.horizontalHeader().setStretchLastSection(True)\n self.verticalLayout_3.addWidget(self.tableWidget_results)\n self.verticalLayout_3.setStretch(0, 15)\n self.verticalLayout_3.setStretch(1, 1)\n self.verticalLayout_3.setStretch(2, 4)\n self.horizontalLayout.addLayout(self.verticalLayout_3)\n self.horizontalLayout.setStretch(0, 2)\n self.horizontalLayout.setStretch(1, 12)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.label_status = QtWidgets.QLabel(self.centralwidget)\n self.label_status.setStyleSheet(\"QLabel\\n\"\n\"{\\n\"\n\" font-size: 16px;\\n\"\n\" font-weight: light;\\n\"\n\" border-radius:9px;\\n\"\n\" background:rgba(66, 195, 255, 0);\\n\"\n\"color: rgb(218, 218, 218);\\n\"\n\"}\\n\"\n\"\")\n self.label_status.setText(\"\")\n self.label_status.setObjectName(\"label_status\")\n self.verticalLayout.addWidget(self.label_status)\n self.verticalLayout.setStretch(0, 9)\n self.horizontalLayout_4.addLayout(self.verticalLayout)\n MainWindow.setCentralWidget(self.centralwidget)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"YOLOv8 GUI\"))\n self.groupBox.setTitle(_translate(\"MainWindow\", \"Tasks\"))\n self.radioButton_det.setText(_translate(\"MainWindow\", \"Detection\"))\n self.radioButton_seg.setText(_translate(\"MainWindow\", \"Segmentation\"))\n self.radioButton_pose.setText(_translate(\"MainWindow\", \"Pose Estimation\"))\n self.groupBox_2.setTitle(_translate(\"MainWindow\", \"Models\"))\n self.comboBox_model.setItemText(0, _translate(\"MainWindow\", \"YOLOv8n\"))\n self.comboBox_model.setItemText(1, _translate(\"MainWindow\", \"YOLOv8s\"))\n self.comboBox_model.setItemText(2, _translate(\"MainWindow\", \"YOLOv8m\"))\n self.comboBox_model.setItemText(3, _translate(\"MainWindow\", \"YOLOv8l\"))\n self.comboBox_model.setItemText(4, _translate(\"MainWindow\", \"YOLOv8x\"))\n self.groupBox_3.setTitle(_translate(\"MainWindow\", \"Inputs\"))\n self.groupBox_4.setTitle(_translate(\"MainWindow\", \"Confidence\"))\n self.groupBox_5.setTitle(_translate(\"MainWindow\", \"IoU\"))\n self.groupBox_6.setTitle(_translate(\"MainWindow\", \"Frame Interval\"))\n item = self.tableWidget_results.horizontalHeaderItem(0)\n item.setText(_translate(\"MainWindow\", \"ID\"))\n item = self.tableWidget_results.horizontalHeaderItem(1)\n item.setText(_translate(\"MainWindow\", \"Class\"))\n item = self.tableWidget_results.horizontalHeaderItem(2)\n item.setText(_translate(\"MainWindow\", \"Confidence\"))\n item = self.tableWidget_results.horizontalHeaderItem(3)\n item.setText(_translate(\"MainWindow\", \"BBox\"))" }, { "identifier": "FileProcessThread", "path": "src/qt/video/video_worker.py", "snippet": "class FileProcessThread(QThread):\n send_thread_start_finish_flag = pyqtSignal(str)\n send_video_info = pyqtSignal(dict)\n send_ai_output = pyqtSignal(list)\n send_display_frame = pyqtSignal(QImage)\n send_play_progress = pyqtSignal(int)\n def __init__(self):\n super(FileProcessThread, self).__init__()\n self.thread_name = \"FileProcessThread\"\n self.threadFlag = False\n \n def set_start_config(self, video_path, ai_task, screen_size, model_name=\"yolov8n\", confidence_threshold=0.35, iou_threshold=0.45, frame_interval=0):\n self.threadFlag = True\n self.video_path = video_path\n self.ai_task = ai_task\n self.pause_process = False\n self.confi_thr = confidence_threshold\n self.iou_thr = iou_threshold\n self.model_name = model_name\n self.frame_interval = frame_interval\n self.get_screen_size(screen_size)\n self._init_yolo()\n self._init_tracker()\n\n def set_iou_threshold(self, iou_threshold):\n self.iou_thr = iou_threshold\n \n def set_confidence_threshold(self, confidence_threshold):\n self.confi_thr = confidence_threshold\n \n def set_model_name(self, model_name):\n self.model_name = model_name\n \n def set_frame_interval(self, frame_interval):\n self.frame_interval = frame_interval\n \n def get_screen_size(self, screen_size):\n self.iw, self.ih = screen_size\n\n def _init_yolo(self):\n if self.ai_task == \"object_detection\":\n self.detector = YoloDetector()\n self.detector.init(\n model_path=os.path.join(ROOT, f\"weights/detection/{self.model_name}.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n self.pose_detector = PoseDetector()\n self.pose_detector.init(\n model_path=os.path.join(ROOT, f\"weights/pose/{self.model_name}-pose.onnx\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n elif self.ai_task == \"segmentation\":\n self.seg_detector = YOLOSeg()\n self.seg_detector.init(\n model_path=os.path.join(ROOT, f\"weights/segmentation/{self.model_name}-seg.onnx\"),\n class_txt_path=os.path.join(ROOT, \"weights/classes.txt\"),\n confidence_threshold=self.confi_thr,\n iou_threshold=self.iou_thr)\n\n def _init_tracker(self):\n self.tracker = DeepSort(\n model_path=os.path.join(ROOT, f\"src/models/tracking/deep_sort/deep/checkpoint/ckpt.t7\"))\n \n def stop_process(self):\n self.threadFlag = False\n \n def toggle_play_pause(self):\n self.pause_process = not self.pause_process\n \n def run(self):\n self.send_thread_start_finish_flag.emit(\"processing_on_file\")\n media_fmt = self.check_image_or_video(self.video_path)\n cap = cv.VideoCapture(self.video_path)\n if not cap.isOpened():\n raise IOError(\"Couldn't open webcam or video\")\n video_info = self.get_video_info(cap)\n self.send_video_info.emit(video_info)\n\n model_output = []\n frame_id = 1\n while self.threadFlag:\n if self.pause_process:\n continue\n ret, frame = cap.read()\n if ret is False:\n break\n\n if frame_id % int(self.frame_interval+1) == 0:\n model_output = []\n if self.ai_task == \"object_detection\":\n model_output = self.detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"pose_detection\":\n model_output = self.pose_detector.inference(frame, self.confi_thr, self.iou_thr)\n elif self.ai_task == \"segmentation\":\n model_output = self.seg_detector.inference(frame, self.confi_thr, self.iou_thr)\n \n if media_fmt == \"video\":\n model_output = self.tracker.update(\n detection_results=model_output,\n ori_img=frame)\n model_output = add_image_id(model_output, frame_id)\n frame = draw_results(frame, model_output)\n display_frame = self.convert_cv_qt(frame, self.ih, self.iw)\n\n self.send_display_frame.emit(display_frame)\n self.send_play_progress.emit(int(frame_id/video_info[\"length\"]*1000))\n self.send_ai_output.emit(model_output)\n frame_id += 1\n cap.release()\n if media_fmt == \"video\":\n blank_image = np.zeros((self.ih, self.iw, 3))\n blank_image = cv.cvtColor(blank_image.astype('uint8'), cv.COLOR_BGR2RGBA)\n show_image = QImage(blank_image.data, blank_image.shape[1], blank_image.shape[0], QImage.Format.Format_RGBA8888)\n self.send_display_frame.emit(show_image)\n self.send_ai_output.emit([])\n self.send_thread_start_finish_flag.emit(\"waiting_for_setting\")\n \n def get_video_info(self, video_cap):\n video_info = {}\n video_info[\"FPS\"] = video_cap.get(cv.CAP_PROP_FPS)\n video_info[\"length\"] = int(video_cap.get(cv.CAP_PROP_FRAME_COUNT))\n video_info[\"size\"] = (int(video_cap.get(cv.CAP_PROP_FRAME_WIDTH)),int(video_cap.get(cv.CAP_PROP_FRAME_HEIGHT)))\n return video_info\n\n def check_image_or_video(self, media_path):\n img_fm = (\".tif\", \".tiff\", \".jpg\", \".jpeg\", \".gif\", \".png\", \".eps\", \".raw\", \".cr2\", \".nef\", \".orf\", \".sr2\", \".bmp\", \".ppm\", \".heif\")\n vid_fm = (\".flv\", \".avi\", \".mp4\", \".3gp\", \".mov\", \".webm\", \".ogg\", \".qt\", \".avchd\")\n media_fms = {\"image\": img_fm, \"video\": vid_fm}\n if any(media_path.lower().endswith(media_fms[\"image\"]) for ext in media_fms[\"image\"]):\n return \"image\"\n elif any(media_path.lower().endswith(media_fms[\"video\"]) for ext in media_fms[\"video\"]):\n return \"video\"\n else:\n raise TypeError(\"Please select an image or video\")\n \n def convert_cv_qt(self, image, screen_height, screen_width):\n h, w, _ = image.shape\n scale = min(screen_width / w, screen_height / h)\n nw, nh = int(scale * w), int(scale * h)\n image_resized = cv.resize(image, (nw, nh))\n image_paded = np.full(shape=[screen_height, screen_width, 3], fill_value=0)\n dw, dh = (screen_width - nw) // 2, (screen_height - nh) // 2\n image_paded[dh:nh + dh, dw:nw + dw, :] = image_resized\n image_paded = cv.cvtColor(image_paded.astype('uint8'), cv.COLOR_BGR2RGBA)\n return QImage(image_paded.data, image_paded.shape[1], image_paded.shape[0], QImage.Format.Format_RGBA8888)" } ]
from src.qt.stream.video_capture import CameraCaptureThread from src.qt.stream.visualize import VideoVisualizationThread from src.qt.stream.ai_worker import AiWorkerThread from src.ui.main_window import Ui_MainWindow from src.qt.video.video_worker import FileProcessThread from PyQt6 import QtGui, QtWidgets from PyQt6.QtCore import Qt import sys import numpy as np
12,238
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread()
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self, parent=None): super(MainWindow, self).__init__(parent) self.setupUi(self) self.ai_thread = AiWorkerThread() self.camera_thread = CameraCaptureThread()
self.display_thread = VideoVisualizationThread()
1
2023-10-18 09:21:01+00:00
16k
S-LoRA/S-LoRA
slora/server/router/manager.py
[ { "identifier": "SamplingParams", "path": "slora/server/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret" }, { "identifier": "Req", "path": "slora/server/io_struct.py", "snippet": "class Req:\n def __init__(self, adapter_dir, request_id, prompt_ids, sample_params: SamplingParams):\n self.adapter_dir = adapter_dir\n self.request_id = request_id\n self.prompt_ids = prompt_ids\n self.input_len = len(prompt_ids)\n self.max_output_len = sample_params.max_new_tokens\n self.sample_params = sample_params\n self.output_ids = []\n self.output_metadata_list = []\n self.has_generate_finished = False\n self.aborted = False\n\n def to_rpc_obj(self):\n return {\"adapter_dir\": self.adapter_dir,\n \"request_id\": self.request_id,\n \"input_id\": self.prompt_ids,\n \"output_len\": self.max_output_len,\n \"sampling_param\": self.sample_params.to_dict() }\n\n def to_req_detokenization_state(self):\n out = ReqDetokenizationState(self.request_id, self.prompt_ids, self.max_output_len, self.sample_params.ignore_eos)\n if self.output_metadata_list:\n out.gen_metadata.update(self.output_metadata_list[-1])\n return out\n \n def stop_sequences_matched(self):\n for stop_token_ids in self.sample_params.stop_sequences:\n stop_len = len(stop_token_ids)\n if stop_len > 0:\n if len(self.output_ids) >= stop_len:\n if all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)):\n return True\n return False\n\n def __repr__(self):\n return (f\"request_id(n={self.request_id}, \"\n f\"adapter_dir={self.adapter_dir}, \")\n # f\"prompt_ids={self.prompt_ids}, \")" }, { "identifier": "Batch", "path": "slora/server/io_struct.py", "snippet": "class Batch:\n def __init__(self, batch_id, reqs: List[Req]):\n self.batch_id = batch_id\n self.reqs = reqs\n self.id_to_reqs = {req.request_id: req for req in reqs}\n\n self.adapter_dirs = set()\n for req in reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def input_tokens(self):\n batch_input_tokens = 0\n for req in self.reqs:\n batch_input_tokens += req.input_len\n return batch_input_tokens\n\n def calcu_max_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + req.max_output_len\n return tokens\n \n def calcu_used_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + len(req.output_ids)\n return tokens\n\n def mark_finished_req(self, eos_id):\n has_new_finish = False\n for req in self.reqs:\n if req.stop_sequences_matched():\n req.has_generate_finished = True\n has_new_finish = True\n if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False:\n req.has_generate_finished = True\n has_new_finish = True\n if len(req.output_ids) >= req.max_output_len or req.aborted:\n req.has_generate_finished = True\n has_new_finish = True\n return has_new_finish\n\n def filter_finished(self):\n unfinished_req = []\n for req in self.reqs:\n if not req.has_generate_finished:\n unfinished_req.append(req)\n self.reqs = unfinished_req\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n\n self.adapter_dirs = set()\n for req in self.reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def is_clear(self):\n return len(self.reqs) == 0\n\n def merge(self, mini_batch):\n for _req in mini_batch.reqs:\n self.reqs.append(_req)\n self.adapter_dirs.add(_req.adapter_dir)\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n return\n\n def __repr__(self):\n return (f\"batch_id={self.batch_id}, \"\n # f\"reqs={self.reqs}, \"\n f\"req_ids={self.id_to_reqs.keys()}\")" }, { "identifier": "BatchAbortReq", "path": "slora/server/io_struct.py", "snippet": "class BatchAbortReq:\n def __init__(self, req_ids):\n self.reqs: List[str] = req_ids" }, { "identifier": "start_model_process", "path": "slora/server/router/model_infer/model_rpc.py", "snippet": "async def start_model_process(port, world_size):\n # 单卡时不使用 rpc\n if world_size == 1:\n return ModelRpcClient(ModelRpcServer(), world_size)\n \n import multiprocessing\n proc = multiprocessing.Process(target=_init_env, args=(port,))\n proc.start()\n await asyncio.sleep(2)\n repeat_count = 0\n while repeat_count < 20:\n try:\n con = rpyc.connect(\"localhost\", port, config={\"allow_pickle\": True})\n break\n except BaseException:\n await asyncio.sleep(1)\n repeat_count += 1\n if repeat_count == 20:\n raise Exception(\"init rpc env error!\")\n\n assert proc.is_alive()\n return ModelRpcClient(con.root, world_size, rpc_server_process=proc)" }, { "identifier": "ModelRpcClient", "path": "slora/server/router/model_infer/model_rpc.py", "snippet": "class ModelRpcClient:\n def __init__(self, model_rpc, world_size, rpc_server_process=None):\n self.model: ModelRpcServer = model_rpc\n self.world_size = world_size\n self.rpc_server_process = rpc_server_process\n self.use_rpc = self.world_size != 1\n if self.use_rpc:\n def async_wrap(f):\n f = rpyc.async_(f)\n async def _func(*args, **kwargs):\n ans = f(*args, **kwargs)\n await asyncio.to_thread(ans.wait)\n # raise if exception\n return ans.value\n return _func\n self._init_model = async_wrap(self.model.init_model)\n self._load_adapters = rpyc.async_(self.model.load_adapters)\n self._offload_adapters = rpyc.async_(self.model.offload_adapters)\n self._unmerge_adapter = rpyc.async_(self.model.unmerge_adapter)\n self._merge_adapter = rpyc.async_(self.model.merge_adapter)\n self._add_batch = async_wrap(self.model.add_batch)\n self._prefill_batch = async_wrap(self.model.prefill_batch)\n self._decode_batch = async_wrap(self.model.decode_batch)\n self._filter_batch = async_wrap(self.model.filter_batch)\n self._merge_batch = async_wrap(self.model.merge_batch)\n self._remove_batch = async_wrap(self.model.remove_batch)\n self._profile_prefill = async_wrap(self.model.profile_prefill)\n else:\n self._init_model = self.model.exposed_init_model\n self._load_adapters = self.model.exposed_load_adapters\n self._offload_adapters = self.model.exposed_offload_adapters\n self._merge_adapter = self.model.exposed_merge_adapter\n self._unmerge_adapter = self.model.exposed_unmerge_adapter\n self._add_batch = self.model.exposed_add_batch\n self._prefill_batch = self.model.exposed_prefill_batch\n self._decode_batch = self.model.exposed_decode_batch\n self._filter_batch = self.model.exposed_filter_batch\n self._merge_batch = self.model.exposed_merge_batch\n self._remove_batch = self.model.exposed_remove_batch\n self._profile_prefill = self.model.exposed_profile_prefill\n return\n\n async def init_model(self, rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t prefetch_stream):\n ans : rpyc.AsyncResult = self._init_model(rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t\t\t\t prefetch_stream)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n\n async def load_adapters(self, reqs, prefetch=False):\n self._load_adapters(reqs, prefetch=prefetch)\n\n\n async def offload_adapters(self, reserved_reqs=None, prefetch=False):\n self._offload_adapters(reserved_reqs, prefetch=prefetch)\n \n async def unmerge_adapter(self):\n self._unmerge_adapter()\n \n async def merge_adapter(self):\n self._merge_adapter()\n\n\n async def init_batch(self, batch_id, reqs):\n ans = self._add_batch(batch_id, reqs, \"fp16\")\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def prefill_batch(self, batch_id):\n ans = self._prefill_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def decode_batch(self, batch_id):\n ans = self._decode_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def filter_batch(self, batch_id, req_id_list):\n ans = self._filter_batch(batch_id, req_id_list)\n if self.use_rpc:\n await ans\n return\n else:\n return \n\n async def merge_batch(self, batch_id1, batch_id2):\n ans = self._merge_batch(batch_id1, batch_id2)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def remove_batch(self, batch_id):\n ans = self._remove_batch(batch_id)\n if self.use_rpc:\n await ans\n return\n else:\n return\n \n async def profile_prefill(self):\n ans = self._profile_prefill()\n if self.use_rpc:\n return await ans\n else:\n return ans" }, { "identifier": "ReqQueue", "path": "slora/server/router/req_queue.py", "snippet": "class ReqQueue:\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n self.max_total_tokens = max_total_tokens\n assert batch_max_tokens is not None\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n self.waiting_req_list: List[Req] = []\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n \n def update_counter(self, req):\n pass \n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "calculate_time", "path": "slora/utils/infer_utils.py", "snippet": "def calculate_time(show=False, min_cost_ms=0.0):\n def wrapper(func):\n def inner_func(*args, **kwargs):\n torch.cuda.synchronize()\n if show:\n start_time = time.time()\n result = func(*args, **kwargs)\n torch.cuda.synchronize()\n if show:\n cost_time = (time.time() - start_time) * 1000\n if cost_time > min_cost_ms:\n print(f\"Function {func.__name__} took {cost_time} ms to run.\")\n return result\n\n return inner_func\n\n return wrapper" }, { "identifier": "BatchTokenIdOut", "path": "slora/server/io_struct.py", "snippet": "class BatchTokenIdOut:\n def __init__(self):\n self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = [] # [req_id, new_token_id, gen_metadata, finished_state, abort_state]" }, { "identifier": "AbortReq", "path": "slora/server/io_struct.py", "snippet": "class AbortReq:\n def __init__(self, req_id):\n self.req_id = req_id" }, { "identifier": "Stats", "path": "slora/server/router/stats.py", "snippet": "class Stats:\n\n def __init__(self, log_status, log_stats_interval) -> None:\n self.log_stats = log_status\n self.log_stats_interval = log_stats_interval\n self.last_log_time = time.time()\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n return\n \n def count_prompt_tokens(self, run_batch):\n if self.log_stats:\n tokens = run_batch.input_tokens()\n self.prompt_tokens += tokens\n self.all_tokens += tokens\n return\n \n def count_output_tokens(self, run_batch):\n if self.log_stats:\n tokens = len(run_batch.reqs)\n self.output_tokens += tokens\n self.all_tokens += tokens\n return\n\n def print_stats(self):\n if not self.log_stats:\n return\n\n now = time.time()\n if now - self.last_log_time > self.log_stats_interval:\n print(f\"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s\")\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n self.last_log_time = now\n return" }, { "identifier": "InputParams", "path": "slora/server/input_params.py", "snippet": "class InputParams:\n\n def __init__(\n self,\n max_req_total_len,\n # kv cache manager parameters\n max_total_token_num,\n pool_size_lora,\n batch_max_tokens,\n running_max_req_size,\n # mem_ratio,\n # adapter_ratio,\n # heuristic\n swap,\n prefetch,\n prefetch_size,\n scheduler,\n profile,\n batch_num_adapters,\n enable_abort,\n # kernel,\n # # debug\n dummy,\n no_lora_compute,\n no_lora_swap,\n # no_lora_copy,\n no_kernel,\n no_mem_pool,\n bmm,\n no_lora,\n # fairness\n fair_weights,\n ) -> None:\n self.max_req_total_len = max_req_total_len\n self.max_total_token_num = max_total_token_num\n self.pool_size_lora = pool_size_lora\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n # self.mem_ratio = mem_ratio\n # self.adapter_ratio = adapter_ratio\n\n self.swap = swap\n self.prefetch = prefetch\n self.prefetch_size = prefetch_size\n self.scheduler = scheduler\n self.profile = profile\n self.batch_num_adapters = batch_num_adapters\n self.enable_abort = enable_abort\n # self.kernel = kernel\n\n self.dummy = dummy\n self.no_lora_compute = no_lora_compute\n self.no_lora_swap = no_lora_swap\n # self.no_lora_copy = no_lora_copy\n self.no_kernel = no_kernel\n self.no_mem_pool = no_mem_pool\n self.bmm = bmm\n self.no_lora = no_lora\n \n self.fair_weights = fair_weights\n return" }, { "identifier": "get_lora_config", "path": "slora/models/peft/lora_adapter.py", "snippet": "def get_lora_config(lora_dir, dummy):\n if dummy:\n return get_lora_config_json(lora_dir), lora_dir\n else:\n lora_dir = re.sub(r'-(\\d+)$', '', lora_dir)\n return hf_load_config(lora_dir)" }, { "identifier": "AlphaModel", "path": "slora/server/router/profiler.py", "snippet": "class AlphaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n print(self.base_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n\n def get_latency(self, batch_size, seq_len):\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n return (self.base_prefill[batch_size - 1][seq_len] + self.base_prefill[batch_size + 1][seq_len]) / 2\n else:\n return np.Inf" }, { "identifier": "BetaModel", "path": "slora/server/router/profiler.py", "snippet": "class BetaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n self.adapter_prefill = profiling_results[1]\n print(self.adapter_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n \n def get_latency(self, rank_size, batch_size, seq_len):\n if rank_size == 0: return 0\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.adapter_prefill[rank_size][batch_size][seq_len] - self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.adapter_prefill[rank_size][2][seq_len] - self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n a = self.adapter_prefill[rank_size][batch_size - 1][seq_len] - self.base_prefill[batch_size - 1][seq_len]\n b = self.adapter_prefill[rank_size][batch_size + 1][seq_len] - self.base_prefill[batch_size + 1][seq_len]\n return (a + b) / 2\n else:\n return np.Inf" }, { "identifier": "AbortReqQueue", "path": "slora/server/router/abort_req_queue.py", "snippet": "class AbortReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.abort_req_list: List[str] = []\n self.req_time_stamp = []\n self.init_bs = 1\n self.apprx_req_rate = 1\n self.apprx_bs = self.init_bs\n self.last_req_num = 0\n self.last_time = time.time()\n \n def append(self, req):\n self.waiting_req_list.insert(0, req)\n self.req_time_stamp.insert(0, time.time())\n assert len(self.waiting_req_list) == len(self.req_time_stamp)\n return\n\n def reset_abort_list(self):\n self.abort_req_list = []\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n self.apprx_req_rate = int(0.7 * (len(self.waiting_req_list) - self.last_req_num) + 0.3 * self.apprx_req_rate)\n for i, req in enumerate(self.waiting_req_list):\n if attainment_func(time.time() - self.req_time_stamp[i] + 0.5) == 0:\n req.aborted = True\n aborted_count += 1\n abort_list.append(req)\n self.abort_req_list.append(req.request_id)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in abort_list]\n \n if self.apprx_req_rate >= self.apprx_bs:\n print(\"apprx bs\", self.apprx_bs, \"req rate\", self.apprx_req_rate)\n # choose from the latest requests\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n elif self.apprx_req_rate < self.apprx_bs:\n # choose from the earliest requests\n for req in reversed(self.waiting_req_list):\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n \n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in can_run_list and self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n self.last_req_num = len(self.waiting_req_list)\n self.apprx_bs = max(int(0.7 * len(new_batch.reqs) + 0.3 * self.apprx_bs), self.init_bs)\n return new_batch\n else:\n return None" }, { "identifier": "ClusterReqQueue", "path": "slora/server/router/cluster_req_queue.py", "snippet": "class ClusterReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, batch_num_adapters) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.batch_num_adapters = batch_num_adapters\n\n def _generate_new_batch_prioritizing_existing_adapters(self, current_batch:Batch, lora_ranks: dict[str, int]):\n filtered_waiting_req_list = list(filter(lambda req: req.adapter_dir in current_batch.adapter_dirs, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n can_run_list = []\n new_batch_total_tokens = 0\n for idx, req in enumerate(filtered_waiting_req_list):\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n\n # If filtered waiting list was not enough to max-out the current running batch, we resolve to FIFO\n for req in self.waiting_req_list:\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n\n return can_run_list\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n\n if current_batch is not None and len(current_batch.adapter_dirs) >= self.batch_num_adapters:\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n rest_of_batch = self._generate_new_batch_prioritizing_existing_adapters(current_batch, lora_ranks)\n can_run_list += rest_of_batch\n if len(can_run_list) != 0:\n return Batch(uuid.uuid4().hex, can_run_list)\n else:\n return None\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None" }, { "identifier": "VTCReqQueue", "path": "slora/server/router/vtc_req_queue.py", "snippet": "class VTCReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size,\n adapter_dirs, fair_weights,\n input_price=1, output_price=2) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.input_price = input_price\n self.output_price = output_price\n self.served = {}\n self.user_req_list = {}\n\n self.adapter_dirs = adapter_dirs\n self.fair_weights = fair_weights\n\n self.fairw = {}\n for i in range(len(adapter_dirs)):\n if i < len(fair_weights):\n self.fairw[adapter_dirs[i]] = fair_weights[i]\n else:\n self.fairw[adapter_dirs[i]] = 1\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n if req.adapter_dir not in self.user_req_list:\n self.user_req_list[req.adapter_dir] = deque([req])\n self.served[req.adapter_dir] = 0\n else:\n self.user_req_list[req.adapter_dir].append(req)\n\n # waiting queue was empty before\n if len(self.user_req_list[req.adapter_dir]) == 1:\n # lift counter\n cnts = [v for k, v in self.served.items()\n if (len(self.user_req_list[k]) > 0 and k != req.adapter_dir)]\n if len(cnts) > 0:\n self.served[req.adapter_dir] = max(self.served[req.adapter_dir], min(cnts))\n\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n if len(self.served) == 0:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n active_served = {k: v for k, v in self.served.items()}\n while True:\n if len(active_served) == 0:\n break\n adapter_dir = min(active_served, key=active_served.get)\n if len(self.user_req_list[adapter_dir]) > 0:\n req = self.user_req_list[adapter_dir][0]\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n self.user_req_list[adapter_dir].popleft()\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n self.user_req_list[adapter_dir].popleft()\n # update fairness counter\n self.served[adapter_dir] += req.input_len * self.input_price / self.fairw[adapter_dir]\n active_served[adapter_dir] += req.input_len * self.input_price / self.fairw[adapter_dir]\n else:\n break\n else:\n del active_served[adapter_dir]\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list\n if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n \n def update_counter(self, current_batch: Batch):\n for req in current_batch.reqs:\n self.served[req.adapter_dir] += 1 * self.output_price / self.fairw[req.adapter_dir]\n\n\n def next_batch(self):\n raise NotImplementedError()" }, { "identifier": "PETSReqQueue", "path": "slora/server/router/pets_req_queue.py", "snippet": "class PETSReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.alpha = None\n self.beta = None # will be set automatically in the profiling function in router.manager\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n def intra_task_batching(self, lora_ranks):\n ## Preprocessing: gather the queries with the same adapter.\n clustered_queries_by_adapter = {}\n for query in self.waiting_req_list:\n adapter_dir = query.adapter_dir\n if adapter_dir in clustered_queries_by_adapter:\n clustered_queries_by_adapter[adapter_dir].append(query)\n else:\n clustered_queries_by_adapter[adapter_dir] = [query]\n\n ## DP\n mini_batches = []\n for adapter_dir, queries in clustered_queries_by_adapter.items():\n state_1st_stage = []\n split_idx_list = []\n\n ### Sort queries according to the sequence length in ascending order.\n queries = sorted(queries, key=lambda x: x.input_len)\n queries.insert(0, None) # Sentinel.\n\n ### Initialize.\n state_1st_stage.append(0)\n split_idx_list.append(0)\n for j in range(1, len(queries)):\n min_cost = np.Inf # INF\n split_idx = 0\n for k in range(1, j+1):\n lora_rank = lora_ranks[adapter_dir]\n tmp = state_1st_stage[k-1] + self.beta.get_latency(lora_rank, j-k+1, queries[j].input_len)\n if tmp < min_cost:\n min_cost = tmp\n split_idx = k-1\n split_idx_list.append(split_idx)\n state_1st_stage.append(min_cost)\n \n ### Split queries into mini-batches according to split_idx_list.\n \n end_idx = len(queries) - 1\n\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n mini_batch = []\n max_len = queries[end_idx].input_len\n for j in range(start_idx, end_idx + 1):\n mini_batch.append(queries[j]) \n mini_batches.append((mini_batch, max_len))\n end_idx = split_idx_list[end_idx] \n \n return mini_batches\n \n # Inter-task batching.\n def inter_task_batching(self, mini_batches):\n ## Sort mini_batches according to the max sequence length.\n mini_batches = sorted(mini_batches, key=lambda x: x[1])\n mini_batches.insert(0, None) # Sentinel.\n\n tmp = 0\n mini_batch_sum = [0]\n for mini_batch in mini_batches[1:]:\n tmp += len(mini_batch[0])\n mini_batch_sum.append(tmp)\n\n ## DP.\n state_2nd_stage = []\n split_idx_list = []\n state_2nd_stage.append(0)\n split_idx_list.append(0)\n\n for i in range(1, len(mini_batches)):\n min_cost = np.Inf # INF\n split_idx = 0\n for j in range(1, i+1):\n total_samples = mini_batch_sum[i] - mini_batch_sum[j-1]\n tmp = state_2nd_stage[j-1] + self.alpha.get_latency(total_samples, mini_batches[i][1])\n if tmp < min_cost:\n min_cost = tmp\n split_idx = j - 1\n split_idx_list.append(split_idx)\n state_2nd_stage.append(min_cost)\n\n ## Split mini_batches into final scheduled_batches.\n ### Split mini_batches into macro_batches.\n\n end_idx = len(mini_batches) - 1\n macro_batches = []\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n macro_batch = []\n max_len = mini_batches[end_idx][1]\n for j in range(start_idx, end_idx + 1):\n macro_batch.append(mini_batches[j]) \n macro_batches.append((macro_batch, max_len))\n end_idx = split_idx_list[end_idx] \n\n total_samples = 0\n for macro_batch in macro_batches:\n for mini_batch in macro_batch[0]:\n total_samples += len(mini_batch[0])\n # print(total_samples)\n\n return macro_batches\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n reqs = self.waiting_req_list\n # when waiting_reqs > 20\n if len(self.waiting_req_list) > 10:\n mini_batches = self.intra_task_batching(lora_ranks)\n macro_batches = self.inter_task_batching(mini_batches)\n \n macro_batch = macro_batches[-1][0]\n reqs = [req for minibatch in macro_batch for req in minibatch[0]]\n \n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in reqs:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "PEFTReqQueue", "path": "slora/server/router/peft_req_queue.py", "snippet": "class PEFTReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n if len(self.waiting_req_list) > 0 and current_batch is None:\n adapter_dir = self.waiting_req_list[0].adapter_dir\n if current_batch is not None:\n adapter_dir = current_batch.reqs[0].adapter_dir\n # heuristics:\n # TODO: think more\n max_other_waited_reqs = 30\n other_waited_reqs = 0\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n other_waited_reqs += 1\n if other_waited_reqs > max_other_waited_reqs:\n return None\n continue\n if req.adapter_dir == adapter_dir:\n break\n\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n continue\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" } ]
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..io_struct import BatchTokenIdOut, AbortReq from .stats import Stats from slora.server.input_params import InputParams from slora.models.peft.lora_adapter import get_lora_config from slora.server.router.profiler import AlphaModel, BetaModel from slora.server.router.abort_req_queue import AbortReqQueue from slora.server.router.cluster_req_queue import ClusterReqQueue from slora.server.router.vtc_req_queue import VTCReqQueue from slora.server.router.pets_req_queue import PETSReqQueue from slora.server.router.peft_req_queue import PEFTReqQueue
12,899
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft":
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft":
return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens,
19
2023-11-05 04:08:36+00:00
16k
fleet-ai/context
cli.py
[ { "identifier": "print_markdown", "path": "utils/utils.py", "snippet": "def print_markdown(message):\n for line in message.split(\"\\n\"):\n line = line.strip()\n if line == \"\":\n print(\"\")\n elif line == \"---\":\n rprint(Rule(style=\"white\"))\n elif line.startswith(\"!!!\"):\n rprint(Text(line[3:], style=\"#D5D7FB\"))\n else:\n rprint(Markdown(line))\n\n if \"\\n\" not in message and message.startswith(\">\"):\n print(\"\")" }, { "identifier": "print_exception", "path": "utils/utils.py", "snippet": "def print_exception(exc_type, exc_value, traceback_obj):\n traceback_details = traceback.extract_tb(traceback_obj)\n for filename, lineno, funcname, text in traceback_details:\n console.print(\n f\"File: {filename}, Line: {lineno}, Func: {funcname}, Text: {text}\"\n )\n console.print(f\"{exc_type.__name__}: {exc_value}\")" }, { "identifier": "extract_code_blocks", "path": "utils/utils.py", "snippet": "def extract_code_blocks(message):\n pattern = r\"```python\\n(.*?)```\"\n matches = re.findall(pattern, message, re.DOTALL)\n return \"\\n\".join(matches)" }, { "identifier": "print_help", "path": "utils/utils.py", "snippet": "def print_help():\n table = Table(show_header=True, header_style=\"bold magenta\")\n table.add_column(\"Command\")\n table.add_column(\"Description\")\n\n # Add rows to the table for each command\n table.add_row(\"-k, --k_value\", \"Number of chunks to return\")\n table.add_row(\n \"-l, --libraries\",\n \"Limit your chat to a list of libraries. Usage: -l library1 library2 library3\",\n )\n table.add_row(\n \"-m, --model\", \"Specify the model. Default: gpt-4-1106-preview (gpt-4-turbo)\"\n )\n table.add_row(\n \"-c, --cite_sources\", \"Determines whether or not the AI model cites its sources\"\n )\n table.add_row(\"-h, --help\", \"Help\")\n\n # Create a panel with the table\n panel = Panel(table, title=\"Help\", border_style=\"blue\")\n\n # Print the panel\n rprint(panel)" }, { "identifier": "TextStream", "path": "utils/stream.py", "snippet": "class TextStream:\n def __init__(self):\n self.live = Live(console=Console(), auto_refresh=False)\n self.live.start()\n\n def print_stream(self, message):\n markdown = Markdown(message.strip() + \"●\")\n panel = Panel(markdown, box=MINIMAL)\n self.live.update(panel)\n self.live.refresh()\n\n def end_stream(self):\n self.live.stop()" }, { "identifier": "retrieve_context", "path": "utils/ai.py", "snippet": "def retrieve_context(query, k=10, filters=None):\n \"\"\"Gets the context from our libraries vector db for a given query.\n\n Args:\n query (str): User input query\n k (int, optional): number of retrieved results. Defaults to 10.\n \"\"\"\n\n # First, we query the API\n responses = retrieve(query, k=k, filters=filters)\n\n # Then, we build the prompt_with_context string\n prompt_with_context = \"\"\n for response in responses:\n prompt_with_context += f\"\\n\\n### Context {response['metadata']['url']} ###\\n{response['metadata']['text']}\"\n return {\"role\": \"user\", \"content\": prompt_with_context}" }, { "identifier": "construct_prompt", "path": "utils/ai.py", "snippet": "def construct_prompt(\n messages,\n context_message,\n model=\"gpt-4-1106-preview\",\n cite_sources=True,\n context_window=3000,\n):\n \"\"\"\n Constructs a RAG (Retrieval-Augmented Generation) prompt by balancing the token count of messages and context_message.\n If the total token count exceeds the maximum limit, it adjusts the token count of each to maintain a 1:1 proportion.\n It then combines both lists and returns the result.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n context_message (dict): Context message to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n List[dict]: The constructed RAG prompt.\n \"\"\"\n # Get the encoding; default to cl100k_base\n if model in OPENAI_MODELS:\n encoding = tiktoken.encoding_for_model(model)\n else:\n encoding = tiktoken.get_encoding(\"cl100k_base\")\n\n # 1) calculate tokens\n reserved_space = 1000\n max_messages_count = int((context_window - reserved_space) / 2)\n max_context_count = int((context_window - reserved_space) / 2)\n\n # 2) construct prompt\n prompts = messages.copy()\n prompts.insert(0, {\"role\": \"system\", \"content\": SYSTEM_PROMPT})\n if cite_sources:\n prompts.insert(-1, {\"role\": \"user\", \"content\": PROMPT})\n\n # 3) find how many tokens each list has\n messages_token_count = len(\n encoding.encode(\n \"\\n\".join(\n [\n f\"<|im_start|>{message['role']}\\n{message['content']}<|im_end|>\"\n for message in prompts\n ]\n )\n )\n )\n context_token_count = len(\n encoding.encode(\n f\"<|im_start|>{context_message['role']}\\n{context_message['content']}<|im_end|>\"\n )\n )\n\n # 4) Balance the token count for each\n if (messages_token_count + context_token_count) > (context_window - reserved_space):\n # context has more than limit, messages has less than limit\n if (messages_token_count < max_messages_count) and (\n context_token_count > max_context_count\n ):\n max_context_count += max_messages_count - messages_token_count\n # messages has more than limit, context has less than limit\n elif (messages_token_count > max_messages_count) and (\n context_token_count < max_context_count\n ):\n max_messages_count += max_context_count - context_token_count\n\n # 5) Cut each list to the max count\n\n # Cut down messages\n while messages_token_count > max_messages_count:\n removed_encoding = encoding.encode(\n f\"<|im_start|>{prompts[1]['role']}\\n{prompts[1]['content']}<|im_end|>\"\n )\n messages_token_count -= len(removed_encoding)\n if messages_token_count < max_messages_count:\n prompts = (\n [prompts[0]]\n + [\n {\n \"role\": prompts[1][\"role\"],\n \"content\": encoding.decode(\n removed_encoding[\n : min(\n int(max_messages_count -\n messages_token_count),\n len(removed_encoding),\n )\n ]\n )\n .replace(\"<|im_start|>\", \"\")\n .replace(\"<|im_end|>\", \"\"),\n }\n ]\n + prompts[2:]\n )\n else:\n prompts = [prompts[0]] + prompts[2:]\n\n # Cut down context\n if context_token_count > max_context_count:\n # Taking a proportion of the content chars length\n reduced_chars_length = int(\n len(context_message[\"content\"]) *\n (max_context_count / context_token_count)\n )\n context_message[\"content\"] = context_message[\"content\"][:reduced_chars_length]\n\n # 6) Combine both lists\n prompts.insert(-1, context_message)\n\n return prompts" }, { "identifier": "get_remote_chat_response", "path": "utils/ai.py", "snippet": "def get_remote_chat_response(messages, model=\"gpt-4-1106-preview\"):\n \"\"\"\n Returns a streamed OpenAI chat response.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n str: The streamed OpenAI chat response.\n \"\"\"\n client = OpenAI(api_key=os.environ.get(\"OPENAI_API_KEY\"))\n\n try:\n response = client.chat.completions.create(\n model=model, messages=messages, temperature=0.2, stream=True\n )\n\n for chunk in response:\n current_context = chunk.choices[0].delta.content\n yield current_context\n\n except openai.AuthenticationError as error:\n print(\"401 Authentication Error:\", error)\n raise Exception(\n \"Invalid OPENAI_API_KEY. Please re-run with a valid key.\")\n\n except Exception as error:\n print(\"Streaming Error:\", error)\n raise Exception(\"Internal Server Error\")" }, { "identifier": "get_other_chat_response", "path": "utils/ai.py", "snippet": "def get_other_chat_response(messages, model=\"local-model\"):\n \"\"\"\n Returns a streamed chat response from a local server.\n\n Parameters:\n messages (List[dict]): List of messages to be included in the prompt.\n model (str): The model to be used for encoding, default is \"gpt-4-1106-preview\".\n\n Returns:\n str: The streamed chat response.\n \"\"\"\n try:\n if model == \"local-model\":\n url = \"http://localhost:1234/v1/chat/completions\"\n headers = {\"Content-Type\": \"application/json\"}\n data = {\n \"messages\": messages,\n \"temperature\": 0.2,\n \"max_tokens\": -1,\n \"stream\": True,\n }\n response = requests.post(\n url, headers=headers, data=json.dumps(data), stream=True, timeout=120\n )\n\n if response.status_code == 200:\n for chunk in response.iter_content(chunk_size=None):\n decoded_chunk = chunk.decode()\n if (\n \"data:\" in decoded_chunk\n and decoded_chunk.split(\"data:\")[1].strip()\n ): # Check if the chunk is not empty\n try:\n chunk_dict = json.loads(\n decoded_chunk.split(\"data:\")[1].strip()\n )\n yield chunk_dict[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n except json.JSONDecodeError:\n pass\n else:\n print(f\"Error: {response.status_code}, {response.text}\")\n raise Exception(\"Internal Server Error\")\n else:\n if not os.environ.get(\"OPENROUTER_API_KEY\"):\n raise Exception(\n f\"For non-OpenAI models, like {model}, set your OPENROUTER_API_KEY.\"\n )\n\n response = requests.post(\n url=\"https://openrouter.ai/api/v1/chat/completions\",\n headers={\n \"Authorization\": f\"Bearer {os.environ.get('OPENROUTER_API_KEY')}\",\n \"HTTP-Referer\": os.environ.get(\n \"OPENROUTER_APP_URL\", \"https://fleet.so/context\"\n ),\n \"X-Title\": os.environ.get(\"OPENROUTER_APP_TITLE\", \"Fleet Context\"),\n \"Content-Type\": \"application/json\",\n },\n data=json.dumps(\n {\"model\": model, \"messages\": messages, \"stream\": True}),\n stream=True,\n timeout=120,\n )\n if response.status_code == 200:\n for chunk in response.iter_lines():\n decoded_chunk = chunk.decode(\"utf-8\")\n if (\n \"data:\" in decoded_chunk\n and decoded_chunk.split(\"data:\")[1].strip()\n ): # Check if the chunk is not empty\n try:\n chunk_dict = json.loads(\n decoded_chunk.split(\"data:\")[1].strip()\n )\n yield chunk_dict[\"choices\"][0][\"delta\"].get(\"content\", \"\")\n except json.JSONDecodeError:\n pass\n else:\n print(f\"Error: {response.status_code}, {response.text}\")\n raise Exception(\"Internal Server Error\")\n\n except requests.exceptions.RequestException as error:\n print(\"Request Error:\", error)\n raise Exception(\n \"Invalid request. Please check your request parameters.\")" }, { "identifier": "ARGUMENTS", "path": "constants/cli.py", "snippet": "ARGUMENTS = [\n {\n \"name\": \"k_value\",\n \"nickname\": \"k\",\n \"help_text\": \"Number of chunks to return\",\n \"type\": int,\n \"default\": 15,\n },\n {\n \"name\": \"libraries\",\n \"nickname\": \"l\",\n \"help_text\": \"Limit your chat to a list of libraries. Usage: -l library1 library2 library3\",\n \"type\": list,\n },\n {\n \"name\": \"model\",\n \"nickname\": \"m\",\n \"help_text\": \"Specify the model. Default: gpt-4\",\n \"type\": str,\n \"default\": \"gpt-4\"\n },\n {\n \"name\": \"cite_sources\",\n \"nickname\": \"c\",\n \"help_text\": \"Determines whether or not the AI model cites its sources\",\n \"type\": bool,\n \"default\": True,\n },\n {\n \"name\": \"local\",\n \"nickname\": \"n\",\n \"help_text\": \"Uses LMStudio for local models\",\n \"type\": bool,\n \"default\": False,\n },\n {\n \"name\": \"context_window\",\n \"nickname\": \"w\",\n \"help_text\": \"Context window (if using local models)\",\n \"type\": int,\n \"default\": 3000,\n },\n]" }, { "identifier": "LIBRARIES", "path": "constants/cli.py", "snippet": "LIBRARIES = [\n \"python\",\n \"boto3\",\n \"urllib3\",\n \"botocore\",\n \"setuptools\",\n \"requests\",\n \"typing-extensions\",\n \"certifi\",\n \"charset-normalizer\",\n \"wheel\",\n \"cryptography\",\n \"python-dateutil\",\n \"idna\",\n \"pyyaml\",\n \"google-api-core\",\n \"six\",\n \"pytz\",\n \"numpy\",\n \"importlib-metadata\",\n \"pip\",\n \"packaging\",\n \"zipp\",\n \"awscli\",\n \"aiobotocore\",\n \"protobuf\",\n \"click\",\n \"pandas\",\n \"pyasn1\",\n \"rsa\",\n \"fsspec\",\n \"pyjwt\",\n \"jmespath\",\n \"markupsafe\",\n \"s3fs\",\n \"attrs\",\n \"cffi\",\n \"psutil\",\n \"lxml\",\n \"pydantic\",\n \"colorama\",\n \"platformdirs\",\n \"googleapis-common-protos\",\n \"pycparser\",\n \"google-auth\",\n \"pyopenssl\",\n \"virtualenv\",\n \"cachetools\",\n \"werkzeug\",\n \"jinja2\",\n \"jsonschema\",\n \"filelock\",\n \"flask\",\n \"sqlalchemy\",\n \"pyparsing\",\n \"docutils\",\n \"async-timeout\",\n \"tzlocal\",\n \"oauthlib\",\n \"pluggy\",\n \"tomli\",\n \"aiohttp\",\n \"grpcio\",\n \"requests-oauthlib\",\n \"pyarrow\",\n \"pytest\",\n \"wrapt\",\n \"tqdm\",\n \"soupsieve\",\n \"dnspython\",\n \"isodate\",\n \"azure-core\",\n \"frozenlist\",\n \"coverage\",\n \"pygments\",\n \"websocket-client\",\n \"beautifulsoup4\",\n \"pillow\",\n \"greenlet\",\n \"importlib-resources\",\n \"distlib\",\n \"yarl\",\n \"multidict\",\n \"scipy\",\n \"decorator\",\n \"aiofiles\",\n \"et-xmlfile\",\n \"openpyxl\",\n \"google-cloud-storage\",\n \"google-cloud-core\",\n \"httptools\",\n \"chardet\",\n \"iniconfig\",\n \"asn1crypto\",\n \"tomlkit\",\n \"tabulate\",\n \"more-itertools\",\n \"requests-toolbelt\",\n \"google-resumable-media\",\n \"paramiko\",\n \"aioconsole\",\n \"deprecated\",\n \"gitpython\",\n \"pynacl\",\n \"google-api-python-client\",\n \"pymysql\",\n \"psycopg2\",\n \"rpds-py\",\n \"proto-plus\",\n \"anyio\",\n \"itsdangerous\",\n \"msal\",\n \"referencing\",\n \"azure-storage-blob\",\n \"jsonschema-specifications\",\n \"bcrypt\",\n \"pathspec\",\n \"scikit-learn\",\n \"smmap\",\n \"msgpack\",\n \"matplotlib\",\n \"poetry-core\",\n \"keyring\",\n \"joblib\",\n \"regex\",\n \"mypy-extensions\",\n \"wcwidth\",\n \"docker\",\n \"sniffio\",\n \"google-auth-oauthlib\",\n \"kiwisolver\",\n \"portalocker\",\n \"pexpect\",\n \"ptyprocess\",\n \"jaraco-classes\",\n \"dill\",\n \"pyrsistent\",\n \"ruamel-yaml\",\n \"gitdb\",\n \"pycryptodomex\",\n \"sqlparse\",\n \"msrest\",\n \"google-crc32c\",\n \"sagemaker\",\n \"tenacity\",\n \"prompt-toolkit\",\n \"google-cloud-bigquery\",\n \"tzdata\",\n \"snowflake-connector-python\",\n \"gunicorn\",\n \"cython\",\n \"py4j\",\n \"py\",\n \"markdown\",\n \"azure-identity\",\n \"httplib2\",\n \"future\",\n \"fonttools\",\n \"alembic\",\n \"markdown-it-py\",\n \"cachecontrol\",\n \"awswrangler\",\n \"rich\",\n \"msal-extensions\",\n \"tornado\",\n \"threadpoolctl\",\n \"jedi\",\n \"marshmallow\",\n \"google-auth-httplib2\",\n \"traitlets\",\n \"cloudpickle\",\n \"shellingham\",\n \"redis\",\n \"pycodestyle\",\n \"backoff\",\n \"python-dotenv\",\n \"scramp\",\n \"toml\",\n \"h11\",\n \"pytest-cov\",\n \"termcolor\",\n \"trove-classifiers\",\n \"annotated-types\",\n \"uritemplate\",\n \"ipython\",\n \"pyzmq\",\n \"networkx\",\n \"xmltodict\",\n \"uvicorn\",\n \"pyspark\",\n \"pg8000\",\n \"mccabe\",\n \"ply\",\n \"prometheus-client\",\n \"prometheus-python\",\n \"redshift-connector\",\n \"oscrypto\",\n \"dulwich\",\n \"webencodings\",\n \"pyodbc\",\n \"pycryptodome\",\n \"httpx\",\n \"sortedcontainers\",\n \"httpcore\",\n \"jeepney\",\n \"mako\",\n \"babel\",\n \"poetry\",\n \"secretstorage\",\n \"defusedxml\",\n \"isort\",\n \"jsonpointer\",\n \"blinker\",\n \"black\",\n \"jupyter-client\",\n \"typing-inspect\",\n \"jupyter-core\",\n \"pymongo\",\n \"mdit-py-plugins\",\n \"datadog\",\n \"contourpy\",\n \"adal\",\n \"pkginfo\",\n \"parso\",\n \"tensorboard\",\n \"toolz\",\n \"pyflakes\",\n \"absl-py\",\n \"sentry-sdk\",\n \"xlrd\",\n \"requests-aws4auth\",\n \"flake8\",\n \"jsonpath-ng\",\n \"python-json-logger\",\n \"nbconvert\",\n \"pickleshare\",\n \"build\",\n \"mdurl\",\n \"backcall\",\n \"fastapi\",\n \"rapidfuzz\",\n \"argcomplete\",\n \"python-utils\",\n \"transformers\",\n \"matplotlib-inline\",\n \"setuptools-scm\",\n \"nbformat\",\n \"ipykernel\",\n \"databricks-cli\",\n \"notebook\",\n \"fastjsonschema\",\n \"jupyter-server\",\n \"mistune\",\n \"huggingface-hub\",\n \"kubernetes\",\n \"debugpy\",\n \"starlette\",\n \"arrow\",\n \"asttokens\",\n \"progressbar2\",\n \"tensorflow\",\n \"google-cloud-pubsub\",\n \"websockets\",\n \"astroid\",\n \"jsonpatch\",\n \"asynctest\",\n \"aioitertools\",\n \"imageio\",\n \"simplejson\",\n \"appdirs\",\n \"pyproject-hooks\",\n \"pylint\",\n \"pbr\",\n \"lazy-object-proxy\",\n \"multiprocess\",\n \"smart-open\",\n \"altair\",\n \"h5py\",\n \"asgiref\",\n \"backports-zoneinfo\",\n \"tinycss2\",\n \"entrypoints\",\n \"bleach\",\n \"oauth2client\",\n \"llvmlite\",\n \"numba\",\n \"cattrs\",\n \"crashtest\",\n \"mlflow\",\n \"send2trash\",\n \"shapely\",\n \"elasticsearch\",\n \"comm\",\n \"cleo\",\n \"orjson\",\n \"pendulum\",\n \"pytest-runner\",\n \"nbclient\",\n \"aenum\",\n \"pygithub\",\n \"identify\",\n \"msrestazure\",\n \"nodeenv\",\n \"mypy\",\n \"flatbuffers\",\n \"great-expectations\",\n \"mock\",\n \"jupyterlab-server\",\n \"zope-interface\",\n \"pytzdata\",\n \"loguru\",\n \"argon2-cffi\",\n \"tokenizers\",\n \"typeguard\",\n \"overrides\",\n \"tox\",\n \"requests-file\",\n \"humanfriendly\",\n \"json5\",\n \"xlsxwriter\",\n \"pysocks\",\n \"google-pasta\",\n \"cfgv\",\n \"pyathena\",\n \"gast\",\n \"azure-storage-file-datalake\",\n \"ipywidgets\",\n \"rfc3339-validator\",\n \"executing\",\n \"jupyterlab\",\n \"pre-commit\",\n \"django\",\n \"querystring-parser\",\n \"contextlib2\",\n \"cached-property\",\n \"installer\",\n \"deepdiff\",\n \"pure-eval\",\n \"tensorflow-serving-api\",\n \"nltk\",\n \"semver\",\n \"retry\",\n \"hvac\",\n \"pipenv\",\n \"uri-template\",\n \"torch\",\n \"execnet\",\n \"html5lib\",\n \"typer\",\n \"croniter\",\n \"lockfile\",\n \"slack-sdk\",\n \"watchdog\",\n \"dataclasses\",\n \"gremlinpython\",\n \"types-pyyaml\",\n \"tensorflow-io-gcs-filesystem\",\n \"setproctitle\",\n \"azure-mgmt-core\",\n \"responses\",\n \"sphinx\",\n \"statsmodels\",\n \"text-unidecode\",\n \"dataclasses-json\",\n \"pandocfilters\",\n \"pytest-xdist\",\n \"async-lru\",\n \"click-plugins\",\n \"opentelemetry-api\",\n \"selenium\",\n \"safetensors\",\n \"opencv-python\",\n \"python-slugify\",\n \"xgboost\",\n \"distro\",\n \"plotly\",\n \"sentencepiece\",\n \"webcolors\",\n \"types-requests\",\n \"rfc3986\",\n \"terminado\",\n \"jupyter-lsp\",\n \"rfc3986-validator\",\n \"configparser\",\n \"argon2-cffi-bindings\",\n \"cmake\",\n \"fastavro\",\n \"docopt\",\n \"unidecode\",\n \"retrying\",\n \"types-urllib3\",\n \"apache-airflow\",\n \"pytest-mock\",\n \"fqdn\",\n \"isoduration\",\n \"tblib\",\n \"prettytable\",\n \"semantic-version\",\n \"sympy\",\n \"seaborn\",\n \"confluent-kafka\",\n \"azure-keyvault-secrets\",\n \"opt-einsum\",\n \"faker\",\n \"jsonpickle\",\n \"mpmath\",\n \"patsy\",\n \"azure-mgmt-resource\",\n \"libclang\",\n \"opencensus\",\n \"antlr4-python3-runtime\",\n \"pysftp\",\n \"ordered-set\",\n \"pymssql\",\n \"db-dtypes\",\n \"astunparse\",\n \"inflection\",\n \"gcsfs\",\n \"thrift\",\n \"parsedatetime\",\n \"dask\",\n \"deprecation\",\n \"scikit-image\",\n \"azure-datalake-store\",\n \"moto\",\n \"zeep\",\n \"makefun\",\n \"pyhcl\",\n \"boto\",\n \"libcst\",\n \"graphviz\",\n \"stevedore\",\n \"gspread\",\n \"snowballstemmer\",\n \"ujson\",\n \"zope-event\",\n \"gevent\",\n \"pyproj\",\n \"checkov\",\n \"python-gnupg\",\n \"pathos\",\n \"trio\",\n \"trio-websocket\",\n \"azure-eventhub\",\n \"typed-ast\",\n \"kombu\",\n \"shap\",\n \"pox\",\n \"ppft\",\n \"datasets\",\n \"apscheduler\",\n \"torchvision\",\n \"click-man\",\n \"accelerate\",\n \"coloredlogs\",\n \"xxhash\",\n \"brotli\",\n \"mypy-boto3-rds\",\n \"docstring-parser\",\n \"applicationinsights\",\n \"apache-beam\",\n \"structlog\",\n \"tldextract\",\n \"lightgbm\",\n \"email-validator\",\n \"wandb\",\n \"cligj\",\n \"kafka-python\",\n \"pybind11\",\n \"fire\",\n \"celery\",\n \"wsproto\",\n \"pywavelets\",\n \"numexpr\",\n \"authlib\",\n \"datetime\",\n \"colorlog\",\n \"pathlib2\",\n \"uamqp\",\n \"texttable\",\n \"pytest-asyncio\",\n \"google-cloud-logging\",\n \"azure-cosmos\",\n \"delta-spark\",\n \"ecdsa\",\n \"nvidia-cudnn-cu11\",\n \"enum34\",\n \"flask-cors\",\n \"slicer\",\n \"spacy\",\n \"fiona\",\n \"python-jose\",\n \"watchtower\",\n \"unicodecsv\",\n \"imagesize\",\n \"schema\",\n \"alabaster\",\n \"kfp\",\n \"geopandas\",\n \"marshmallow-enum\",\n \"apache-airflow-providers-common-sql\",\n \"pyfunctional\",\n \"dbt-core\",\n \"validators\",\n \"keras-preprocessing\",\n \"holidays\",\n \"python-daemon\",\n \"readme-renderer\",\n \"djangorestframework\",\n \"pandas-gbq\",\n \"azure-storage-queue\",\n \"azure-servicebus\",\n \"hypothesis\",\n \"tifffile\",\n \"sshtunnel\",\n \"graphframes\",\n \"lz4\",\n \"kfp-server-api\",\n \"python-magic\",\n \"invoke\",\n \"avro-python3\",\n \"parse\",\n \"kfp-pipeline-spec\",\n \"freezegun\",\n \"constructs\",\n \"outcome\",\n \"python-multipart\",\n \"billiard\",\n \"monotonic\",\n \"pip-tools\",\n \"vine\",\n \"fasteners\",\n \"ddtrace\",\n \"databricks-sql-connector\",\n \"pycountry\",\n \"azure-keyvault-keys\",\n \"sendgrid\",\n \"click-repl\",\n \"srsly\",\n \"pika\",\n \"chex\",\n \"thinc\",\n \"ijson\",\n \"jira\",\n \"docker-pycreds\",\n \"hpack\",\n \"opencv-python-headless\",\n \"blis\",\n \"flask-sqlalchemy\",\n \"fuzzywuzzy\",\n \"xlwt\",\n \"imbalanced-learn\",\n \"qtconsole\",\n \"pydata-google-auth\",\n \"h2\",\n \"pyproject-api\",\n \"sh\",\n \"lit\",\n \"hyperframe\",\n \"stringcase\",\n \"astor\",\n \"langchain-guides\",\n \"langchain\",\n \"wasabi\",\n \"pytest-metadata\",\n \"bitarray\",\n \"pathtools\",\n \"catalogue\",\n \"nose\",\n \"yapf\",\n \"distributed\",\n \"amqp\",\n \"pathy\",\n \"qtpy\",\n \"types-pytz\",\n \"boto3-stubs\",\n \"triton\",\n \"office365-rest-python-client\",\n \"hatchling\",\n \"jupyter-console\",\n \"slackclient\",\n \"atomicwrites\",\n \"starkbank-ecdsa\",\n \"omegaconf\",\n \"editables\",\n \"uvloop\",\n \"humanize\",\n \"knack\",\n \"botocore-stubs\",\n \"iso8601\",\n \"smdebug-rulesconfig\",\n \"crcmod\",\n \"torchmetrics\",\n \"fastparquet\",\n \"python-levenshtein\",\n \"pytimeparse\",\n \"mypy-boto3-s3\",\n \"einops\",\n \"pywin32\",\n \"jpype1\",\n \"pydeequ\",\n \"cog\",\n \"azure-cli\",\n \"pymeeus\",\n \"types-six\",\n \"murmurhash\",\n \"ansible\",\n \"pyspnego\",\n \"inflect\",\n \"phonenumbers\",\n \"flask-wtf\",\n \"cymem\",\n \"preshed\",\n \"cdk-nag\",\n \"aws-requests-auth\",\n \"google-cloud-audit-log\",\n \"ua-parser\",\n \"jsondiff\",\n \"yamllint\",\n \"nbclassic\",\n \"cerberus\",\n \"lazy-loader\",\n \"dacite\",\n \"statsd\",\n \"cssselect\",\n \"dpath\",\n \"apispec\",\n \"gensim\",\n \"django-cors-headers\",\n \"ruff\",\n \"gradio\",\n \"convertdate\",\n \"scp\",\n \"geopy\",\n \"sqlalchemy-utils\",\n \"azure-data-tables\",\n \"pypdf2\",\n \"partd\",\n \"graphql-core\",\n \"python-gitlab\",\n \"ninja\",\n \"ratelimit\",\n \"junit-xml\",\n \"levenshtein\",\n \"fabric\",\n \"pydot\",\n \"azure-storage-file-share\",\n \"pytorch-lightning\",\n \"watchfiles\",\n \"types-setuptools\",\n \"requests-mock\",\n \"strip-hints\",\n \"keras-applications\",\n \"pyotp\",\n \"mashumaro\",\n \"apache-airflow-providers-http\",\n \"ipaddress\",\n \"timm\",\n \"click-didyoumean\",\n \"bytecode\",\n \"parameterized\",\n \"netaddr\",\n \"flask-appbuilder\",\n \"pyperclip\",\n \"openapi-spec-validator\",\n \"onnx\",\n \"marshmallow-sqlalchemy\",\n \"locket\",\n \"lark\",\n \"mysqlclient\",\n \"confection\",\n \"pytest-html\",\n \"azure-cosmosdb-table\",\n \"agate\",\n \"geographiclib\",\n \"types-paramiko\",\n \"pytest-rerunfailures\",\n \"pyserial\",\n \"spacy-loggers\",\n \"flask-login\",\n \"flask-jwt-extended\",\n \"azure-devops\",\n \"xarray\",\n \"spark-nlp\",\n \"dateparser\",\n \"onnxruntime\",\n \"twisted\",\n \"lightning-utilities\",\n \"wtforms\",\n \"jaydebeapi\",\n \"bokeh\",\n \"natsort\",\n \"google-cloud-bigtable\",\n \"grpcio-health-checking\",\n \"tensorflow-text\",\n \"twine\",\n \"commonmark\",\n \"grpcio-reflection\",\n \"flask-caching\",\n \"cron-descriptor\",\n \"pyaml\",\n \"geoip2\",\n \"nh3\",\n \"autopep8\",\n \"python-editor\",\n \"logbook\",\n \"ftfy\",\n \"cachelib\",\n \"datadog-api-client\",\n \"jupyter\",\n \"hologram\",\n \"protobuf3-to-dict\",\n \"ndg-httpsclient\",\n \"promise\",\n \"azureml-core\",\n \"pydub\",\n \"jax\",\n \"flit-core\",\n \"zstandard\",\n \"cssselect2\",\n \"minimal-snowplow-tracker\",\n \"dbt-extractor\",\n \"connexion\",\n \"azure-keyvault-certificates\",\n \"configargparse\",\n \"aniso8601\",\n \"cairocffi\",\n \"hyperlink\",\n \"cramjam\",\n \"elasticsearch-dsl\",\n \"mypy-boto3-redshift-data\",\n \"h3\",\n \"cairosvg\",\n \"maxminddb\",\n \"pytz-deprecation-shim\",\n \"reportlab\",\n \"langcodes\",\n \"pytest-forked\",\n \"pymupdf\",\n \"ansible-core\",\n \"cloudevents\",\n \"leather\",\n \"ddsketch\",\n \"jaxlib\",\n \"oldest-supported-numpy\",\n \"tiktoken\",\n \"supervisor\",\n \"diskcache\",\n \"functions-framework\",\n \"hdfs\",\n \"apache-airflow-providers-ssh\",\n \"gradio-client\",\n \"azure-multiapi-storage\",\n \"funcsigs\",\n \"azure-kusto-data\",\n \"envier\",\n \"pyhive\",\n \"types-protobuf\",\n \"django-filter\",\n \"elastic-transport\",\n \"parse-type\",\n \"types-python-dateutil\",\n \"boltons\",\n \"python-docx\",\n \"twilio\",\n \"twilio-python\",\n \"pgpy\",\n \"korean-lunar-calendar\",\n \"azure-eventgrid\",\n \"async-generator\",\n \"globus-sdk\",\n \"apache-airflow-providers-imap\",\n \"sentence-transformers\",\n \"mkdocs-material\",\n \"aws-xray-sdk\",\n \"resolvelib\",\n \"linkify-it-py\",\n \"setuptools-rust\",\n \"google\",\n \"terminaltables\",\n \"keystoneauth1\",\n \"apache-airflow-providers-ftp\",\n \"javaproperties\",\n \"sqlalchemy-redshift\",\n \"jdcal\",\n \"pep517\",\n \"incremental\",\n \"limits\",\n \"unittest-xml-reporting\",\n \"frozendict\",\n \"service-identity\",\n \"factory-boy\",\n \"ml-dtypes\",\n \"addict\",\n \"uc-micro-py\",\n \"shortuuid\",\n \"pypandoc\",\n \"blessed\",\n \"cx-oracle\",\n \"requests-ntlm\",\n \"django-extensions\",\n \"apache-airflow-providers-amazon\",\n \"python-keystoneclient\",\n \"bracex\",\n \"cmdstanpy\",\n \"apache-airflow-providers-sqlite\",\n \"cookiecutter\",\n \"types-cryptography\",\n \"flask-session\",\n \"timezonefinder\",\n \"magicattr\",\n \"pymsteams\",\n \"pylint-plugin-utils\",\n \"voluptuous\",\n \"langsmith\",\n \"cinemagoer\",\n \"passlib\",\n \"imdbpy\",\n \"emoji\",\n \"databricks-api\",\n \"configobj\",\n \"bandit\",\n \"ultralytics\",\n \"w3lib\",\n \"dirac\",\n \"backports-functools-lru-cache\",\n \"tableauserverclient\",\n \"automat\",\n \"pypika\",\n \"pydash\",\n \"py-cpuinfo\",\n \"mmh3\",\n \"tokenize-rt\",\n \"python-swiftclient\",\n \"tensorflow-hub\",\n \"librosa\",\n \"webdriver-manager\",\n \"constantly\",\n \"user-agents\",\n \"injector\",\n \"youtube-dl\",\n \"pdfminer-six\",\n \"markdown2\",\n \"ffmpy\",\n \"mergedeep\",\n \"netifaces\",\n \"databricks-sdk\",\n \"azure-keyvault-administration\",\n \"ephem\",\n \"flax\",\n \"urllib3-secure-extra\",\n \"looker-sdk\",\n \"kornia\",\n \"python3-openid\",\n \"userpath\",\n \"polars\",\n \"tensorboardx\",\n \"openapi-schema-validator\",\n \"jellyfish\",\n \"ray\",\n \"django-storages\",\n \"asyncpg\",\n \"dynamodb-json\",\n \"pycocotools\",\n \"lunarcalendar\",\n \"types-redis\",\n \"dm-tree\",\n \"flask-limiter\",\n \"scapy\",\n \"sacremoses\",\n \"hiredis\",\n \"netcdf4\",\n \"pyhocon\",\n \"cmaes\",\n \"feedparser\",\n \"firebase-admin\",\n \"yacs\",\n \"prison\",\n \"pytest-localserver\",\n \"polling2\",\n \"flask-babel\",\n \"influxdb\",\n \"binaryornot\",\n \"psycopg3\",\n \"sarif-om\",\n \"jschema-to-python\",\n \"cfn-flip\",\n \"google-apitools\",\n \"ipdb\",\n \"pyrfc3339\",\n \"filterpy\",\n \"py-spy\",\n \"wcmatch\",\n \"launchdarkly-server-sdk\",\n \"pyelftools\",\n \"logging-azure-rest\",\n \"python-jenkins\",\n \"apache-airflow-providers-cncf-kubernetes\",\n \"google-ads\",\n \"clickclick\",\n \"streamlit\",\n \"pylint-django\",\n \"yq\",\n \"findspark\",\n \"pycares\",\n \"mkdocs\",\n \"pytimeparse2\",\n \"ldap3\",\n \"pyee\",\n \"pydocstyle\",\n \"catboost\",\n \"sqlalchemy-jsonfield\",\n \"optuna\",\n \"aws-lambda-powertools\",\n \"lru-dict\",\n \"rasterio\",\n \"cartoframes\",\n \"carto\",\n \"aiodns\",\n \"pyrestcli\",\n \"opentracing\",\n \"tensorflow-datasets\",\n \"apache-airflow-providers-google\",\n \"jsonlines\",\n \"azure\",\n \"backports-weakref\",\n \"diff-cover\",\n \"cftime\",\n \"azure-kusto-ingest\",\n \"qrcode\",\n \"redis-py-cluster\",\n \"diffusers\",\n \"grpclib\",\n \"pypdf\",\n \"thrift-sasl\",\n \"django-debug-toolbar\",\n \"dynaconf\",\n \"django-redis\",\n \"salesforce-bulk\",\n \"kazoo\",\n \"configupdater\",\n \"comtypes\",\n \"langdetect\",\n \"hydra-core\",\n \"pytest-django\",\n \"pywin32-ctypes\",\n \"pyminizip\",\n \"pathvalidate\",\n \"google-re2\",\n \"idna-ssl\",\n \"dagster-pandas\",\n \"toposort\",\n \"expiringdict\",\n \"rdflib\",\n \"etils\",\n \"rich-argparse\",\n \"xyzservices\",\n \"bottle\",\n \"oslo-utils\",\n \"prophet\",\n \"pdfplumber\",\n \"azure-mgmt-subscription\",\n \"parsl\",\n \"jsii\",\n \"click-option-group\",\n \"analytics-python\",\n \"home-run\",\n \"funcx\",\n \"funcx-common\",\n \"lmdb\",\n \"zict\",\n \"multi-key-dict\",\n \"hatch-fancy-pypi-readme\",\n \"googlemaps\",\n \"pyudev\",\n \"atlassian-python-api\",\n \"dohq-artifactory\",\n \"oslo-i18n\",\n \"whitenoise\",\n \"aiosqlite\",\n \"python-engineio\",\n \"enum-compat\",\n \"affine\",\n \"fs\",\n \"flake8-bugbear\",\n \"hyperopt\",\n \"multipledispatch\",\n \"oslo-serialization\",\n \"pygeohash\",\n \"somnium\",\n \"kaleido\",\n \"python-snappy\",\n \"python-pptx\",\n \"gql\",\n \"pymdown-extensions\",\n \"wexpect\",\n \"types-pyopenssl\",\n \"foundationdb\",\n \"jsonschema-spec\",\n \"iopath\",\n \"snuggs\",\n \"strict-rfc3339\",\n \"tablib\",\n \"orderedmultidict\",\n \"sqlglot\",\n \"fakeredis\",\n \"pystan\",\n \"python-socketio\",\n \"robotframework\",\n \"pkgconfig\",\n \"pycairo\",\n \"python-consul\",\n \"curlify\",\n \"types-toml\",\n \"backports-tempfile\",\n \"multimethod\",\n \"pynamodb\",\n \"docker-compose\",\n \"munch\",\n \"torchaudio\",\n \"elementpath\",\n \"mypy-boto3-lambda\",\n \"python-decouple\",\n \"mypy-boto3-dynamodb\",\n \"pylev\",\n \"pmdarima\",\n \"drf-yasg\",\n \"path\",\n \"pyxlsb\",\n \"pandasql\",\n \"pipdeptree\",\n \"debtcollector\",\n \"nvidia-ml-py\",\n \"pyinstaller-hooks-contrib\",\n \"dvclive\",\n \"koalas\",\n \"arviz\",\n \"coreapi\",\n \"sqlalchemy-bigquery\",\n \"pyquery\",\n \"webob\",\n \"faiss-cpu\",\n \"flower\",\n \"cloudformation-cli\",\n \"azureml-dataset-runtime\",\n \"azure-mgmt\",\n \"cloudformation-cli-java-plugin\",\n \"pyinstaller\",\n \"python-box\",\n \"pympler\",\n \"mypy-boto3-secretsmanager\",\n \"marshmallow-oneofschema\",\n \"schedule\",\n \"resampy\",\n \"bitstring\",\n \"timeout-decorator\",\n \"furl\",\n \"bidict\",\n \"setuptools-git\",\n \"jsonmerge\",\n \"htmlmin\",\n \"plumbum\",\n \"gdown\",\n \"evergreen-py\",\n \"tableauhyperapi\",\n \"xformers\",\n \"yt-dlp\",\n \"waitress\",\n \"mypy-boto3-cloudformation\",\n \"tld\",\n \"pipx\",\n \"fake-useragent\",\n \"junitparser\",\n \"pylint-flask\",\n \"jaraco-functools\",\n \"geomet\",\n \"yappi\",\n \"flask-openid\",\n \"apache-airflow-providers-snowflake\",\n \"ciso8601\",\n \"paho-mqtt\",\n \"aiohttp-retry\",\n \"smbprotocol\",\n \"mypy-protobuf\",\n \"msgpack-python\",\n \"dockerpty\",\n \"cssutils\",\n \"djangorestframework-simplejwt\",\n \"wordcloud\",\n \"pytest-env\",\n \"django-environ\",\n \"s3cmd\",\n \"graphene\",\n \"soundfile\",\n \"html2text\",\n \"dagster-dbt\",\n \"apache-airflow-providers-databricks\",\n \"python-nvd3\",\n \"pygobject\",\n \"azureml-sdk\",\n \"click-default-group\",\n \"azureml-dataprep\",\n \"pygit2\",\n \"boto3-type-annotations\",\n \"imagehash\",\n \"ec2-metadata\",\n \"requests-futures\",\n \"rx\",\n \"geventhttpclient\",\n \"wget\",\n \"xmlschema\",\n \"python-rapidjson\",\n \"playwright\",\n \"flatten-json\",\n \"collections-extended\",\n \"myst-parser\",\n \"flask-restful\",\n \"facebook-business\",\n \"pdpyras\",\n \"python-crfsuite\",\n \"pydeck\",\n \"dash-core-components\",\n \"publication\",\n \"zthreading\",\n \"cheroot\",\n \"minio\",\n \"uwsgi\",\n \"portpicker\",\n \"simplegeneric\",\n \"python-crontab\",\n \"basicsr\",\n \"facexlib\",\n \"testpath\",\n \"json-log-formatter\",\n \"ghp-import\",\n \"sseclient-py\",\n \"ansi2html\",\n \"jproperties\",\n \"django-timezone-field\",\n \"duckdb\",\n \"pygsheets\",\n \"pyzstd\",\n \"opencv-contrib-python\",\n \"pyyaml-env-tag\",\n \"pyaes\",\n \"pooch\",\n \"funcy\",\n \"appnope\",\n \"cerberus-python-client\",\n \"realesrgan\",\n \"readchar\",\n \"cassandra-driver\",\n \"requests-unixsocket\",\n \"pyproject-metadata\",\n \"dictdiffer\",\n \"pypng\",\n \"ffmpeg-python\",\n \"locust\",\n \"pymc\",\n \"modelx\",\n \"ffn\",\n \"finance-py\",\n \"gs-quant\",\n \"tf-quant-finance\",\n \"finta\",\n \"qstrader\",\n \"blankly\",\n \"ta-lib-python\",\n \"zipline\",\n \"bt\",\n \"backtrader\",\n \"pyalgotrade\",\n \"pandas-ta\",\n \"ta\",\n \"finmarket-py\",\n \"zvt\",\n \"py-portfolio-opt\",\n \"eiten\",\n \"backtesting-py\",\n \"quantstats\",\n \"qtpylib\",\n \"freqtrade\",\n \"qlib\",\n \"jesse\",\n \"finrl\",\n \"bulbea\",\n \"octobot\",\n \"tda-api\",\n \"vectorbt\",\n \"lean\",\n \"pybroker\",\n \"pyfolio\",\n \"empyrical\",\n \"finquant\",\n \"riskfolio-lib\",\n \"alphalens\",\n \"arch\",\n \"pyflux\",\n \"tsfresh\",\n \"gluonts\",\n \"yfinance\",\n \"alpha-vantage\",\n \"pandas-datareader\",\n \"yahoo-finance\",\n \"findatapy\",\n \"wallstreet\",\n \"alpaca-trade-api-python\",\n \"investpy\",\n \"xlwings\",\n \"dtale\",\n \"mplfinance\",\n \"keras\",\n \"opensearch-py\",\n \"openai\",\n \"dash\",\n \"stripe\",\n]" }, { "identifier": "OPENAI_MODELS", "path": "constants/cli.py", "snippet": "OPENAI_MODELS = [\n \"gpt-4-1106-preview\",\n \"gpt-4\",\n \"gpt-3.5-turbo\",\n \"gpt-3.5-turbo-16k\",\n]" }, { "identifier": "MODELS_TO_TOKENS", "path": "constants/ai.py", "snippet": "MODELS_TO_TOKENS = {\n \"gpt-4\": 8192,\n \"gpt-4-1106-preview\": 128000,\n \"gpt-4-32k\": 32768,\n \"gpt-3.5-turbo\": 4097,\n \"gpt-3.5-turbo-16k\": 16385,\n}" } ]
import os import openai import sys import argparse import traceback from getpass import getpass from rich import print as rprint from utils.utils import print_markdown, print_exception, extract_code_blocks, print_help from utils.stream import TextStream from utils.ai import ( retrieve_context, construct_prompt, get_remote_chat_response, get_other_chat_response, ) from constants.cli import ARGUMENTS, LIBRARIES, OPENAI_MODELS from constants.ai import MODELS_TO_TOKENS
13,164
model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters) prompts = construct_prompt( messages, rag_context, model=model, cite_sources=cite_sources, context_window=context_window, ) full_response = "" try: streamer = TextStream() if model in OPENAI_MODELS: for response in get_remote_chat_response(prompts, model=model): if response: full_response += response streamer.print_stream(full_response) else:
# pylint: disable=E0401 # pylint: disable=W0122 # pylint: disable=W0718 def main(): parser = argparse.ArgumentParser(description="Fleet Data Retriever", add_help=False) parser.add_argument("help", nargs="?", default=argparse.SUPPRESS) # Add arguments for arg in ARGUMENTS: if arg["type"] == bool: default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], action="store_true", default=default, ) elif arg["type"] == list: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=str, nargs="+", choices=choices, default=default, ) else: choices = arg["choices"] if "choices" in arg else None default = arg["default"] if "default" in arg else None parser.add_argument( f'-{arg["nickname"]}', f'--{arg["name"]}', dest=arg["name"], help=arg["help_text"], type=arg["type"], choices=choices, default=default, ) # Hit the retrieve endpoint args = parser.parse_args() k = args.k_value model = args.model cite_sources = args.cite_sources filters = {} if getattr(args, "help", None) is not None: print_help() return # If library specified, match library name to uuid if args.libraries: for library in args.libraries: if library not in LIBRARIES: rprint( "Library not found. Please refer to the list of available libraries." ) return filters["library_name"] = args.libraries # Get context window if model in OPENAI_MODELS: context_window = MODELS_TO_TOKENS[model] else: context_window = args.context_window # If local model requested, use LMStudio api_key = "" if args.local: model = "local-model" print_markdown( f"""--- **You are using a local model.** We're working with LM Studio to provide access to local models for you. Download and start your model to get started. Instructions: 1. Download LM Studio. You can find the download link here: https://lmstudio.ai 2. Open LM Studio and download your model of choice. 3. Click the **↔ icon** on the very left sidebar 4. Select your model and click "Start Server" Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) else: openrouter_key = os.environ.get("OPENROUTER_API_KEY") openai_key = os.environ.get("OPENAI_API_KEY") # Get the OpenAI API key, if not found if model in OPENAI_MODELS and not openai_key: print_markdown( """--- !!!**OpenAI API key not found.** Please provide a key to proceed. --- """ ) openai_key = getpass("OpenAI API key: ") os.environ["OPENAI_API_KEY"] = openai_key print_markdown( """ --- **Tip**: To save this key for later, run `export OPENAI_API_KEY=<your key>` on mac/linux or `setx OPENAI_API_KEY <your key>` on windows. For non-OpenAI models, you should set `OPENROUTER_API_KEY`, and optionally `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`. ---""" ) # Otherwise, grab the openrouter key, if not found elif model not in OPENAI_MODELS and not openrouter_key: print_markdown( """--- !!!**OpenRouter API key not found.** Please provide a key to proceed. --- """ ) api_key = getpass("OpenRouter API key: ") os.environ["OPENROUTER_API_KEY"] = api_key print_markdown( f""" --- **Tip**: To save this key for later, run `export OPENROUTER_API_KEY=<your key>` on mac/linux or `setx OPENROUTER_API_KEY <your key>` on windows. You can optionally set `OPENROUTER_APP_URL` and `OPENROUTER_APP_TITLE`, too. Note that your context window is set to {context_window}. To change this, run `context --context_window <context window>`. ---""" ) if model == "gpt-4-1106-preview": print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. *Warning*: You are using gpt-4-turbo, which is not yet stable and is rate limited at 100 requests per day. Please use with caution. """ ) else: print_markdown( """!!!Welcome to Fleet Context! Generate and run code using the most up-to-date libraries. """ ) messages = [] while True: try: query = input("> ") query = query.strip() if not query: continue if query.lower() == "exit": rprint("Exiting. Goodbye!") break messages.append({"role": "user", "content": query}) rag_context = retrieve_context(query, k=k, filters=filters) prompts = construct_prompt( messages, rag_context, model=model, cite_sources=cite_sources, context_window=context_window, ) full_response = "" try: streamer = TextStream() if model in OPENAI_MODELS: for response in get_remote_chat_response(prompts, model=model): if response: full_response += response streamer.print_stream(full_response) else:
for response in get_other_chat_response(prompts, model=model):
8
2023-11-02 07:07:13+00:00
16k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=None,\n types=None,\n post=False,\n loop=None,\n ):\n Judge.clear()\n self._judges = get_judges(judges, timeout, verify_ssl)\n self._method = 'POST' if post else 'GET'\n self._max_tries = max_tries\n self._real_ext_ip = real_ext_ip\n self._strict = strict\n self._dnsbl = dnsbl or []\n self._types = types or {}\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = Resolver(loop=self._loop)\n\n self._req_http_proto = not types or bool(\n ('HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5') & types.keys()\n )\n self._req_https_proto = not types or bool(('HTTPS',) & types.keys())\n self._req_smtp_proto = not types or bool(('CONNECT:25',) & types.keys()) # noqa\n\n self._ngtrs = {proto for proto in types or NGTRS}\n\n async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;' % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n\n def _types_passed(self, proxy):\n if not self._types:\n return True\n for proto, lvl in proxy.types.copy().items():\n req_levels = self._types.get(proto)\n if not req_levels or (lvl in req_levels):\n if not self._strict:\n return True\n else:\n if self._strict:\n del proxy.types[proto]\n if self._strict and proxy.types:\n return True\n proxy.log('Protocol or the level of anonymity differs from the requested')\n return False\n\n async def _in_DNSBL(self, host):\n _host = '.'.join(reversed(host.split('.'))) # reverse address\n tasks = []\n for domain in self._dnsbl:\n query = '.'.join([_host, domain])\n tasks.append(self._resolver.resolve(query, logging=False))\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n if any([r for r in responses if not isinstance(r, ResolveError)]):\n return True\n return False\n\n async def check(self, proxy):\n if self._dnsbl:\n if await self._in_DNSBL(proxy.host):\n proxy.log('Found in DNSBL')\n return False\n\n if self._req_http_proto:\n await Judge.ev['HTTP'].wait()\n if self._req_https_proto:\n await Judge.ev['HTTPS'].wait()\n if self._req_smtp_proto:\n await Judge.ev['SMTP'].wait()\n\n if proxy.expected_types:\n ngtrs = proxy.expected_types & self._ngtrs\n else:\n ngtrs = self._ngtrs\n\n results = []\n for proto in ngtrs:\n if proto == 'CONNECT:25':\n result = await self._check_conn_25(proxy, proto)\n else:\n result = await self._check(proxy, proto)\n results.append(result)\n\n proxy.is_working = True if any(results) else False\n\n if proxy.is_working and self._types_passed(proxy):\n return True\n return False\n\n async def _check_conn_25(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n proxy.types[proxy.ngtr.name] = None\n result = True\n break\n finally:\n proxy.close()\n return result\n\n async def _check(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n headers, content, rv = await _send_test_request(\n self._method, proxy, judge\n )\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n content = _decompress_content(headers, content)\n result = _check_test_response(proxy, headers, content, rv)\n if result:\n if proxy.ngtr.check_anon_lvl:\n lvl = _get_anonymity_lvl(\n self._real_ext_ip, proxy, judge, content\n )\n else:\n lvl = None\n proxy.types[proxy.ngtr.name] = lvl\n break\n finally:\n proxy.close()\n return result" }, { "identifier": "ResolveError", "path": "proxyhub/errors.py", "snippet": "class ResolveError(Exception):\n pass" }, { "identifier": "PROVIDERS", "path": "proxyhub/providers.py", "snippet": "PROVIDERS = [\n Provider(\n url='http://www.proxylists.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 49\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks4',\n proto=('SOCKS4'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks5',\n proto=('SOCKS5'),\n ), # added by ZerGo0\n Provider(\n url='http://ipaddress.com/proxy-list/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 53\n Provider(\n url='https://www.sslproxies.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 100\n Provider(\n url='https://freshfreeproxylist.wordpress.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 50\n Provider(\n url='http://proxytime.ru/http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 1400\n Provider(\n url='https://free-proxy-list.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 300\n Provider(\n url='https://us-proxy.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://fineproxy.org/eng/fresh-proxies/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 5500\n Provider(url='https://socks-proxy.net/', proto=('SOCKS4', 'SOCKS5')), # 80\n Provider(\n url='http://www.httptunnel.ge/ProxyListForFree.aspx',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://cn-proxy.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 70\n Provider(\n url='https://hugeproxies.com/home/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 800\n Provider(\n url='http://proxy.rufey.ru/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 153\n Provider(\n url='https://geekelectronics.org/my-servisy/proxy',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 400\n Provider(\n url='http://pubproxy.com/api/proxy?limit=20&format=txt',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 20\n Proxy_list_org(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 140\n Xseo_in(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 240\n Spys_ru(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 660\n Proxylistplus_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 450\n Proxylist_me(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 2872\n Foxtools_ru(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'), max_conn=1\n ), # noqa; 500\n Gatherproxy_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 3212\n Nntime_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 1050\n Blogspot_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 24800\n Gatherproxy_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 30\n Blogspot_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1486\n Tools_rosinstrument_com(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')\n ), # noqa; 4000\n Tools_rosinstrument_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1800\n My_proxy_com(max_conn=2), # noqa; 1000\n Checkerproxy_net(), # noqa; 60000\n Aliveproxy_com(), # noqa; 210\n Freeproxylists_com(), # noqa; 1338\n Webanetlabs_net(), # noqa; 5000\n Maxiproxies_com(), # noqa; 430\n Proxylist_download(), # noqa; 35590\n # # Bad...\n # http://www.proxylist.ro/\n # Provider(url='http://proxydb.net/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS',\n # 'CONNECT:25', 'SOCKS4', 'SOCKS5')),\n # Provider(url='http://www.cybersyndrome.net/pla6.html',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 1100\n # Provider(url='https://www.ip-adress.com/proxy-list',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 57\n # Provider(url='https://www.marcosbl.com/lab/proxies/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 89\n # Provider(url='http://go4free.xyz/Free-Proxy/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 196\n # Provider(url='http://blackstarsecurity.com/proxy-list.txt'), # 7014\n # Provider(url='http://www.get-proxy.net/proxy-archives'), # 519\n # Proxyb_net(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 857\n # Proxz_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n # max_conn=2), # 443\n # Proxynova_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 818\n # _50kproxies_com(), # 822\n # Free_proxy_cz(), # 420\n]" }, { "identifier": "Provider", "path": "proxyhub/providers.py", "snippet": "class Provider:\n \"\"\"Proxy provider.\n\n Provider - a website that publish free public proxy lists.\n\n :param str url: Url of page where to find proxies\n :param tuple proto:\n (optional) List of the types (protocols) that may be supported\n by proxies returned by the provider. Then used as :attr:`Proxy.types`\n :param int max_conn:\n (optional) The maximum number of concurrent connections on the provider\n :param int max_tries:\n (optional) The maximum number of attempts to receive response\n :param int timeout:\n (optional) Timeout of a request in seconds\n \"\"\"\n\n _pattern = IPPortPatternGlobal\n\n def __init__(\n self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None\n ):\n if url:\n self.domain = urlparse(url).netloc\n self.url = url\n self.proto = proto\n self._max_tries = max_tries\n self._timeout = timeout\n self._session = None\n self._cookies = {}\n self._proxies = set()\n # concurrent connections on the current provider\n self._sem_provider = asyncio.Semaphore(max_conn)\n self._loop = loop or asyncio.get_event_loop()\n\n @property\n def proxies(self):\n \"\"\"Return all found proxies.\n\n :return:\n Set of tuples with proxy hosts, ports and types (protocols)\n that may be supported (from :attr:`.proto`).\n\n For example:\n {('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}\n\n :rtype: set\n \"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, new):\n new = [(host, port, self.proto) for host, port in new if port]\n self._proxies.update(new)\n\n async def get_proxies(self):\n \"\"\"Receive proxies from the provider and return them.\n\n :return: :attr:`.proxies`\n \"\"\"\n log.debug('Try to get proxies from %s' % self.domain)\n\n async with aiohttp.ClientSession(\n headers=get_headers(), cookies=self._cookies, loop=self._loop\n ) as self._session:\n await self._pipe()\n\n log.debug(\n '%d proxies received from %s: %s'\n % (len(self.proxies), self.domain, self.proxies)\n )\n return self.proxies\n\n async def _pipe(self):\n await self._find_on_page(self.url)\n\n async def _find_on_pages(self, urls):\n if not urls:\n return\n tasks = []\n if not isinstance(urls[0], dict):\n urls = set(urls)\n for url in urls:\n if isinstance(url, dict):\n tasks.append(self._find_on_page(**url))\n else:\n tasks.append(self._find_on_page(url))\n await asyncio.gather(*tasks)\n\n async def _find_on_page(self, url, data=None, headers=None, method='GET'):\n page = await self.get(url, data=data, headers=headers, method=method)\n oldcount = len(self.proxies)\n try:\n received = self.find_proxies(page)\n except Exception as e:\n received = []\n log.error(\n 'Error when executing find_proxies.'\n 'Domain: %s; Error: %r' % (self.domain, e)\n )\n self.proxies = received\n added = len(self.proxies) - oldcount\n log.debug(\n '%d(%d) proxies added(received) from %s' % (added, len(received), url)\n )\n\n async def get(self, url, data=None, headers=None, method='GET'):\n for _ in range(self._max_tries):\n page = await self._get(url, data=data, headers=headers, method=method)\n if page:\n break\n return page\n\n async def _get(self, url, data=None, headers=None, method='GET'):\n page = ''\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with self._sem_provider, self._session.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as resp:\n page = await resp.text()\n if resp.status != 200:\n log.debug(\n 'url: %s\\nheaders: %s\\ncookies: %s\\npage:\\n%s'\n % (url, resp.headers, resp.cookies, page)\n )\n raise BadStatusError('Status: %s' % resp.status)\n except (\n UnicodeDecodeError,\n BadStatusError,\n asyncio.TimeoutError,\n aiohttp.ClientOSError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n ) as e:\n page = ''\n log.debug('%s is failed. Error: %r;' % (url, e))\n return page\n\n def find_proxies(self, page):\n return self._find_proxies(page)\n\n def _find_proxies(self, page):\n proxies = self._pattern.findall(page)\n return proxies" }, { "identifier": "Proxy", "path": "proxyhub/proxy.py", "snippet": "class Proxy:\n \"\"\"Proxy.\n\n :param str host: IP address of the proxy\n :param int port: Port of the proxy\n :param tuple types:\n (optional) List of types (protocols) which may be supported\n by the proxy and which can be checked to work with the proxy\n :param int timeout:\n (optional) Timeout of a connection and receive a response in seconds\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n\n :raises ValueError: If the host not is IP address, or if the port > 65535\n \"\"\"\n\n @classmethod\n async def create(cls, host, *args, **kwargs):\n \"\"\"Asynchronously create a :class:`Proxy` object.\n\n :param str host: A passed host can be a domain or IP address.\n If the host is a domain, try to resolve it\n :param str *args:\n (optional) Positional arguments that :class:`Proxy` takes\n :param str **kwargs:\n (optional) Keyword arguments that :class:`Proxy` takes\n\n :return: :class:`Proxy` object\n :rtype: proxyhub.Proxy\n\n :raises ResolveError: If could not resolve the host\n :raises ValueError: If the port > 65535\n \"\"\" # noqa: W605\n loop = kwargs.pop('loop', None)\n resolver = kwargs.pop('resolver', Resolver(loop=loop))\n try:\n _host = await resolver.resolve(host)\n self = cls(_host, *args, **kwargs)\n except (ResolveError, ValueError) as e:\n log.error('%s:%s: Error at creating: %s' % (host, args[0], e))\n raise\n return self\n\n def __init__(self, host=None, port=None, types=(), timeout=8, verify_ssl=False):\n self.host = host\n if not Resolver.host_is_ip(self.host):\n raise ValueError(\n 'The host of proxy should be the IP address. '\n 'Try Proxy.create() if the host is a domain'\n )\n\n self.port = int(port)\n if self.port > 65535:\n raise ValueError('The port of proxy cannot be greater than 65535')\n\n self.expected_types = set(types) & {\n 'HTTP',\n 'HTTPS',\n 'CONNECT:80',\n 'CONNECT:25',\n 'SOCKS4',\n 'SOCKS5',\n }\n self._timeout = timeout\n self._ssl_context = True if verify_ssl else _ssl._create_unverified_context()\n self._types = {}\n self._is_working = False\n self.stat = {'requests': 0, 'errors': Counter()}\n self._ngtr = None\n self._geo = Resolver.get_ip_info(self.host)\n self._log = []\n self._runtimes = []\n self._schemes = ()\n self._closed = True\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n\n def __repr__(self):\n \"\"\"Class representation\n e.g. <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>\n \"\"\"\n tpinfo = []\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n s = '{tp}: {lvl}' if lvl else '{tp}'\n s = s.format(tp=tp, lvl=lvl)\n tpinfo.append(s)\n tpinfo = ', '.join(tpinfo)\n return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(\n code=self._geo.code,\n types=tpinfo,\n host=self.host,\n port=self.port,\n avg=self.avg_resp_time,\n )\n\n @property\n def types(self):\n \"\"\"Types (protocols) supported by the proxy.\n\n | Where key is type, value is level of anonymity\n (only for HTTP, for other types level always is None).\n | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n | Available levels: Transparent, Anonymous, High.\n\n :rtype: dict\n \"\"\"\n return self._types\n\n @property\n def is_working(self):\n \"\"\"True if the proxy is working, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._is_working\n\n @is_working.setter\n def is_working(self, val):\n self._is_working = val\n\n @property\n def writer(self):\n return self._writer.get('ssl') or self._writer.get('conn')\n\n @property\n def reader(self):\n return self._reader.get('ssl') or self._reader.get('conn')\n\n @property\n def priority(self):\n return (self.error_rate, self.avg_resp_time)\n\n @property\n def error_rate(self):\n \"\"\"Error rate: from 0 to 1.\n\n For example: 0.7 = 70% requests ends with error.\n\n :rtype: float\n\n .. versionadded:: 0.2.0\n \"\"\"\n if not self.stat['requests']:\n return 0\n return round(sum(self.stat['errors'].values()) / self.stat['requests'], 2)\n\n @property\n def schemes(self):\n \"\"\"Return supported schemes.\"\"\"\n if not self._schemes:\n _schemes = []\n if self.types.keys() & _HTTP_PROTOS:\n _schemes.append('HTTP')\n if self.types.keys() & _HTTPS_PROTOS:\n _schemes.append('HTTPS')\n self._schemes = tuple(_schemes)\n return self._schemes\n\n @property\n def avg_resp_time(self):\n \"\"\"The average connection/response time.\n\n :rtype: float\n \"\"\"\n if not self._runtimes:\n return 0\n return round(sum(self._runtimes) / len(self._runtimes), 2)\n\n @property\n def avgRespTime(self):\n \"\"\"\n .. deprecated:: 2.0\n Use :attr:`avg_resp_time` instead.\n \"\"\"\n warnings.warn(\n '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.',\n DeprecationWarning,\n )\n return self.avg_resp_time\n\n @property\n def geo(self):\n \"\"\"Geo information about IP address of the proxy.\n\n :return:\n Named tuple with fields:\n * ``code`` - ISO country code\n * ``name`` - Full name of country\n * ``region_code`` - ISO region code\n * ``region_name`` - Full name of region\n * ``city_name`` - Full name of city\n :rtype: collections.namedtuple\n\n .. versionchanged:: 0.2.0\n In previous versions return a dictionary, now named tuple.\n \"\"\"\n return self._geo\n\n @property\n def ngtr(self):\n return self._ngtr\n\n @ngtr.setter\n def ngtr(self, proto):\n self._ngtr = NGTRS[proto](self)\n\n def as_json(self):\n \"\"\"Return the proxy's properties in JSON format.\n\n :rtype: dict\n \"\"\"\n info = {\n 'host': self.host,\n 'port': self.port,\n 'geo': {\n 'country': {'code': self._geo.code, 'name': self._geo.name},\n 'region': {\n 'code': self._geo.region_code,\n 'name': self._geo.region_name,\n },\n 'city': self._geo.city_name,\n },\n 'types': [],\n 'avg_resp_time': self.avg_resp_time,\n 'error_rate': self.error_rate,\n }\n\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n info['types'].append({'type': tp, 'level': lvl or ''})\n return info\n\n def as_text(self):\n \"\"\"\n Return proxy as host:port\n\n :rtype: str\n \"\"\"\n return \"{}:{}\\n\".format(self.host, self.port)\n\n def log(self, msg, stime=0, err=None):\n ngtr = self.ngtr.name if self.ngtr else 'INFO'\n runtime = time.time() - stime if stime else 0\n log.debug(\n '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(\n h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime\n )\n )\n trunc = '...' if len(msg) > 58 else ''\n msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)\n self._log.append((ngtr, msg, runtime))\n if err:\n self.stat['errors'][err.errmsg] += 1\n if runtime and 'timeout' not in msg:\n self._runtimes.append(runtime)\n\n def get_log(self):\n \"\"\"Proxy log.\n\n :return: The proxy log in format: (negotaitor, msg, runtime)\n :rtype: tuple\n\n .. versionadded:: 0.2.0\n \"\"\"\n return self._log\n\n async def connect(self, ssl=False):\n err = None\n msg = '%s' % 'SSL: ' if ssl else ''\n stime = time.time()\n self.log('%sInitial connection' % msg)\n try:\n if ssl:\n _type = 'ssl'\n sock = self._writer['conn'].get_extra_info('socket')\n params = {\n 'ssl': self._ssl_context,\n 'sock': sock,\n 'server_hostname': self.host,\n }\n else:\n _type = 'conn'\n params = {'host': self.host, 'port': self.port}\n self._reader[_type], self._writer[_type] = await asyncio.wait_for(\n asyncio.open_connection(**params), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg += 'Connection: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionRefusedError, OSError, _ssl.SSLError):\n msg += 'Connection: failed'\n err = ProxyConnError(msg)\n raise err\n # except asyncio.CancelledError:\n # log.debug('Cancelled in proxy.connect()')\n # raise ProxyConnError()\n else:\n msg += 'Connection: success'\n self._closed = False\n finally:\n self.stat['requests'] += 1\n self.log(msg, stime, err=err)\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n if self.writer:\n # try:\n self.writer.close()\n # except RuntimeError:\n # print('Try proxy.close() when loop is closed:',\n # asyncio.get_event_loop()._closed)\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n self.log('Connection: closed')\n self._ngtr = None\n\n async def send(self, req):\n msg, err = '', None\n _req = req.encode() if not isinstance(req, bytes) else req\n try:\n self.writer.write(_req)\n await self.writer.drain()\n except ConnectionResetError:\n msg = '; Sending: failed'\n err = ProxySendError(msg)\n raise err\n finally:\n self.log('Request: %s%s' % (req, msg), err=err)\n\n async def recv(self, length=0, head_only=False):\n resp, msg, err = b'', '', None\n stime = time.time()\n try:\n resp = await asyncio.wait_for(\n self._recv(length, head_only), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg = 'Received: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionResetError, OSError):\n msg = 'Received: failed' # (connection is reset by the peer)\n err = ProxyRecvError(msg)\n raise err\n else:\n msg = 'Received: %s bytes' % len(resp)\n if not resp:\n err = ProxyEmptyRecvError(msg)\n raise err\n finally:\n if resp:\n msg += ': %s' % resp[:12]\n self.log(msg, stime, err=err)\n return resp\n\n async def _recv(self, length=0, head_only=False):\n resp = b''\n if length:\n try:\n resp = await self.reader.readexactly(length)\n except asyncio.IncompleteReadError as e:\n resp = e.partial\n else:\n body_size, body_recv, chunked = 0, 0, None\n while not self.reader.at_eof():\n line = await self.reader.readline()\n resp += line\n if body_size:\n body_recv += len(line)\n if body_recv >= body_size:\n break\n elif chunked and line == b'0\\r\\n':\n break\n elif not body_size and line == b'\\r\\n':\n if head_only:\n break\n headers = parse_headers(resp)\n body_size = int(headers.get('Content-Length', 0))\n if not body_size:\n chunked = headers.get('Transfer-Encoding') == 'chunked'\n return resp" }, { "identifier": "Resolver", "path": "proxyhub/resolver.py", "snippet": "class Resolver:\n \"\"\"Async host resolver based on aiodns.\"\"\"\n\n _cached_hosts = {}\n _ip_hosts = [\n 'https://wtfismyip.com/text',\n 'http://api.ipify.org/',\n 'http://ipinfo.io/ip',\n 'http://ipv4.icanhazip.com/',\n 'http://myexternalip.com/raw',\n 'http://ipinfo.io/ip',\n 'http://ifconfig.io/ip',\n ]\n # the list of resolvers will point a copy of original one\n _temp_host = []\n\n def __init__(self, timeout=5, loop=None):\n self._timeout = timeout\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = aiodns.DNSResolver(loop=self._loop)\n\n @staticmethod\n def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n host = '.'.join(f'{int(n)}' for n in host.split('.'))\n ipaddress.IPv4Address(host)\n except (ipaddress.AddressValueError, ValueError):\n return False\n else:\n return True\n\n @staticmethod\n def get_ip_info(ip):\n \"\"\"Return geo information about IP address.\n\n `code` - ISO country code\n `name` - Full name of country\n `region_code` - ISO region code\n `region_name` - Full name of region\n `city_name` - Full name of city\n \"\"\"\n # from pprint import pprint\n try:\n ipInfo = _mmdb_reader.get(ip) or {}\n except (maxminddb.errors.InvalidDatabaseError, ValueError):\n ipInfo = {}\n\n code, name = '--', 'Unknown'\n city_name, region_code, region_name = ('Unknown',) * 3\n if 'country' in ipInfo:\n code = ipInfo['country']['iso_code']\n name = ipInfo['country']['names']['en']\n elif 'continent' in ipInfo:\n code = ipInfo['continent']['code']\n name = ipInfo['continent']['names']['en']\n if 'city' in ipInfo:\n city_name = ipInfo['city']['names']['en']\n if 'subdivisions' in ipInfo:\n region_code = ipInfo['subdivisions'][0]['iso_code']\n region_name = ipInfo['subdivisions'][0]['names']['en']\n return GeoData(code, name, region_code, region_name, city_name)\n\n def _pop_random_ip_host(self):\n host = random.choice(self._temp_host)\n self._temp_host.remove(host)\n return host\n\n async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n # make a copy of original one to temp one\n # so original one will stay no change\n self._temp_host = self._ip_hosts.copy()\n while self._temp_host:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n\n async def resolve(self, host, port=80, family=None, qtype='A', logging=True):\n \"\"\"Return resolving IP address(es) from host name.\"\"\"\n if self.host_is_ip(host):\n return host\n\n _host = self._cached_hosts.get(host)\n if _host:\n return _host\n\n resp = await self._resolve(host, qtype)\n\n if resp:\n hosts = [\n {\n 'hostname': host,\n 'host': r.host,\n 'port': port,\n 'family': family,\n 'proto': socket.IPPROTO_IP,\n 'flags': socket.AI_NUMERICHOST,\n }\n for r in resp\n ]\n if family:\n self._cached_hosts[host] = hosts\n else:\n self._cached_hosts[host] = hosts[0]['host']\n if logging:\n log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))\n else:\n if logging:\n log.warning('%s: Could not resolve host' % host)\n return self._cached_hosts.get(host)\n\n async def _resolve(self, host, qtype):\n try:\n resp = await asyncio.wait_for(\n self._resolver.query(host, qtype), timeout=self._timeout\n )\n except (aiodns.error.DNSError, asyncio.TimeoutError):\n raise ResolveError\n else:\n return resp" }, { "identifier": "Server", "path": "proxyhub/server.py", "snippet": "class Server:\n \"\"\"Server distributes incoming requests to a pool of found proxies.\"\"\"\n\n def __init__(\n self,\n host,\n port,\n proxies,\n timeout=8,\n max_tries=3,\n min_queue=5,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n prefer_connect=False,\n http_allowed_codes=None,\n backlog=100,\n loop=None,\n **kwargs,\n ):\n self.host = host\n self.port = int(port)\n self._loop = loop or asyncio.get_event_loop()\n self._timeout = timeout\n self._max_tries = max_tries\n self._backlog = backlog\n self._prefer_connect = prefer_connect\n\n self._server = None\n self._connections = {}\n self._proxy_pool = ProxyPool(\n proxies, min_req_proxy, max_error_rate, max_resp_time, min_queue\n )\n self._resolver = Resolver(loop=self._loop)\n self._http_allowed_codes = http_allowed_codes or []\n\n def start(self):\n\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(self._server.sockets[0].getsockname())\n )\n\n def stop(self):\n if not self._server:\n return\n for conn in self._connections:\n if not conn.done():\n conn.cancel()\n self._server.close()\n if not self._loop.is_running():\n self._loop.run_until_complete(self._server.wait_closed())\n # Time to close the running futures in self._connections\n self._loop.run_until_complete(asyncio.sleep(0.5))\n self._server = None\n self._loop.stop()\n log.info('Server is stopped')\n\n def _accept(self, client_reader, client_writer):\n def _on_completion(f):\n reader, writer = self._connections.pop(f)\n writer.close()\n log.debug('client: %d; closed' % id(client_reader))\n try:\n exc = f.exception()\n except asyncio.CancelledError:\n log.debug('CancelledError in server._handle:_on_completion')\n exc = None\n if exc:\n if isinstance(exc, NoProxyError):\n self.stop()\n else:\n raise exc\n\n f = asyncio.ensure_future(self._handle(client_reader, client_writer))\n f.add_done_callback(_on_completion)\n self._connections[f] = (client_reader, client_writer)\n\n async def _handle(self, client_reader, client_writer):\n log.debug(\n 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),)\n )\n\n request, headers = await self._parse_request(client_reader)\n scheme = self._identify_scheme(headers)\n client = id(client_reader)\n log.debug(\n 'client: %d; request: %s; headers: %s; scheme: %s'\n % (client, request, headers, scheme)\n )\n\n # API for controlling proxyhub2\n if headers['Host'] == 'proxycontrol':\n _api, _operation, _params = headers['Path'].split('/', 5)[3:]\n if _api == 'api':\n if _operation == 'remove':\n proxy_host, proxy_port = _params.split(':', 1)\n self._proxy_pool.remove(proxy_host, int(proxy_port))\n log.debug(\n 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s'\n % (client, request, headers, scheme, proxy_host, proxy_port)\n )\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n elif _operation == 'history':\n query_type, url = _params.split(':', 1)\n if query_type == 'url':\n previous_proxy = history.get(\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{url}\"\n )\n if previous_proxy is None:\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n else:\n previous_proxy_bytestring = (\n '{\"proxy\": \"%s\"}' % previous_proxy\n ).encode()\n client_writer.write(b'HTTP/1.1 200 OK\\r\\n')\n client_writer.write(b'Content-Type: application/json\\r\\n')\n client_writer.write(\n f\"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\\r\\n\"\n )\n client_writer.write(b'Access-Control-Allow-Origin: *\\r\\n')\n client_writer.write(\n b'Access-Control-Allow-Credentials: true\\r\\n\\r\\n'\n )\n\n client_writer.write(previous_proxy_bytestring + b'\\r\\n')\n await client_writer.drain()\n return\n\n for attempt in range(self._max_tries):\n stime, err = 0, None\n proxy = await self._proxy_pool.get(scheme)\n proto = self._choice_proto(proxy, scheme)\n log.debug(\n 'client: %d; attempt: %d; proxy: %s; proto: %s'\n % (client, attempt, proxy, proto)\n )\n\n try:\n await proxy.connect()\n\n if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):\n host = headers.get('Host')\n port = headers.get('Port', 80)\n try:\n ip = await self._resolver.resolve(host)\n except ResolveError:\n return\n proxy.ngtr = proto\n await proxy.ngtr.negotiate(host=host, port=port, ip=ip)\n if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):\n client_writer.write(CONNECTED)\n await client_writer.drain()\n else: # HTTP\n await proxy.send(request)\n else: # proto: HTTP & HTTPS\n await proxy.send(request)\n\n history[\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{headers['Path']}\"\n ] = (proxy.host + ':' + str(proxy.port))\n inject_resp_header = {\n 'headers': {'X-Proxy-Info': proxy.host + ':' + str(proxy.port)}\n }\n\n stime = time.time()\n stream = [\n asyncio.ensure_future(\n self._stream(reader=client_reader, writer=proxy.writer)\n ),\n asyncio.ensure_future(\n self._stream(\n reader=proxy.reader,\n writer=client_writer,\n scheme=scheme,\n inject=inject_resp_header,\n )\n ),\n ]\n await asyncio.gather(*stream, loop=self._loop)\n except asyncio.CancelledError:\n log.debug('Cancelled in server._handle')\n break\n except (\n ProxyTimeoutError,\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n log.debug('client: %d; error: %r' % (client, e))\n continue\n except ErrorOnStream as e:\n log.debug(\n 'client: %d; error: %r; EOF: %s'\n % (client, e, client_reader.at_eof())\n )\n for task in stream:\n if not task.done():\n task.cancel()\n if client_reader.at_eof() and 'Timeout' in repr(e):\n # Proxy may not be able to receive EOF and weel be raised a\n # TimeoutError, but all the data has already successfully\n # returned, so do not consider this error of proxy\n break\n err = e\n if scheme == 'HTTPS': # SSL Handshake probably failed\n break\n else:\n break\n finally:\n proxy.log(request.decode(), stime, err=err)\n proxy.close()\n self._proxy_pool.put(proxy)\n\n async def _parse_request(self, reader, length=65536):\n request = await reader.read(length)\n headers = parse_headers(request)\n if headers['Method'] == 'POST' and request.endswith(b'\\r\\n\\r\\n'):\n # For aiohttp. POST data returns on second reading\n request += await reader.read(length)\n return request, headers\n\n def _identify_scheme(self, headers):\n if headers['Method'] == 'CONNECT':\n return 'HTTPS'\n else:\n return 'HTTP'\n\n def _choice_proto(self, proxy, scheme):\n if scheme == 'HTTP':\n if self._prefer_connect and ('CONNECT:80' in proxy.types):\n proto = 'CONNECT:80'\n else:\n relevant = {\n 'HTTP',\n 'CONNECT:80',\n 'SOCKS4',\n 'SOCKS5',\n } & proxy.types.keys()\n proto = relevant.pop()\n else: # HTTPS\n relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys()\n proto = relevant.pop()\n return proto\n\n async def _stream(self, reader, writer, length=65536, scheme=None, inject=None):\n checked = False\n\n try:\n while not reader.at_eof():\n data = await asyncio.wait_for(reader.read(length), self._timeout)\n if not data:\n writer.close()\n break\n elif scheme and not checked:\n self._check_response(data, scheme)\n\n if inject.get('headers') is not None and len(inject['headers']) > 0:\n data = self._inject_headers(data, scheme, inject['headers'])\n\n checked = True\n\n writer.write(data)\n await writer.drain()\n\n except (\n asyncio.TimeoutError,\n ConnectionResetError,\n OSError,\n ProxyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n raise ErrorOnStream(e)\n\n def _check_response(self, data, scheme):\n if scheme == 'HTTP' and self._http_allowed_codes:\n line = data.split(b'\\r\\n', 1)[0].decode()\n try:\n header = parse_status_line(line)\n except BadStatusLine:\n raise BadResponseError\n if header['Status'] not in self._http_allowed_codes:\n raise BadStatusError(\n '%r not in %r' % (header['Status'], self._http_allowed_codes)\n )\n\n def _inject_headers(self, data, scheme, headers):\n custom_lines = []\n\n if scheme == 'HTTP' or scheme == 'HTTPS':\n status_line, rest_lines = data.split(b'\\r\\n', 1)\n custom_lines.append(status_line)\n\n for k, v in headers.items():\n custom_lines.append(('%s: %s' % (k, v)).encode())\n\n custom_lines.append(rest_lines)\n data = b'\\r\\n'.join(custom_lines)\n\n return data" }, { "identifier": "IPPortPatternLine", "path": "proxyhub/utils.py", "snippet": "BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ndef get_headers(rv=False):\ndef get_all_ip(page):\ndef get_status_code(resp, start=9, stop=12):\ndef parse_status_line(line):\ndef parse_headers(headers):\ndef update_geoip_db():" } ]
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
13,671
verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required')
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required')
self._checker = Checker(
0
2023-11-05 13:28:57+00:00
16k
TheFunny/ArisuAutoSweeper
module/device/method/minitouch.py
[ { "identifier": "Config", "path": "module/base/decorator.py", "snippet": "class Config:\n \"\"\"\n Decorator that calls different function with a same name according to config.\n\n func_list likes:\n func_list = {\n 'func1': [\n {'options': {'ENABLE': True}, 'func': 1},\n {'options': {'ENABLE': False}, 'func': 1}\n ]\n }\n \"\"\"\n func_list = {}\n\n @classmethod\n def when(cls, **kwargs):\n \"\"\"\n Args:\n **kwargs: Any option in AzurLaneConfig.\n\n Examples:\n @Config.when(USE_ONE_CLICK_RETIREMENT=True)\n def retire_ships(self, amount=None, rarity=None):\n pass\n\n @Config.when(USE_ONE_CLICK_RETIREMENT=False)\n def retire_ships(self, amount=None, rarity=None):\n pass\n \"\"\"\n from module.logger import logger\n options = kwargs\n\n def decorate(func):\n name = func.__name__\n data = {'options': options, 'func': func}\n if name not in cls.func_list:\n cls.func_list[name] = [data]\n else:\n override = False\n for record in cls.func_list[name]:\n if record['options'] == data['options']:\n record['func'] = data['func']\n override = True\n if not override:\n cls.func_list[name].append(data)\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n \"\"\"\n Args:\n self: ModuleBase instance.\n *args:\n **kwargs:\n \"\"\"\n for record in cls.func_list[name]:\n\n flag = [value is None or self.config.__getattribute__(key) == value\n for key, value in record['options'].items()]\n if not all(flag):\n continue\n\n return record['func'](self, *args, **kwargs)\n\n logger.warning(f'No option fits for {name}, using the last define func.')\n return func(self, *args, **kwargs)\n\n return wrapper\n\n return decorate" }, { "identifier": "cached_property", "path": "module/base/decorator.py", "snippet": "class cached_property(Generic[T]):\n \"\"\"\n cached-property from https://github.com/pydanny/cached-property\n Add typing support\n\n A property that is only computed once per instance and then replaces itself\n with an ordinary attribute. Deleting the attribute resets the property.\n Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76\n \"\"\"\n\n def __init__(self, func: Callable[..., T]):\n self.func = func\n\n def __get__(self, obj, cls) -> T:\n if obj is None:\n return self\n\n value = obj.__dict__[self.func.__name__] = self.func(obj)\n return value" }, { "identifier": "del_cached_property", "path": "module/base/decorator.py", "snippet": "def del_cached_property(obj, name):\n \"\"\"\n Delete a cached property safely.\n\n Args:\n obj:\n name (str):\n \"\"\"\n try:\n del obj.__dict__[name]\n except KeyError:\n pass" }, { "identifier": "Timer", "path": "module/base/timer.py", "snippet": "class Timer:\n def __init__(self, limit, count=0):\n \"\"\"\n Args:\n limit (int, float): Timer limit\n count (int): Timer reach confirm count. Default to 0.\n When using a structure like this, must set a count.\n Otherwise it goes wrong, if screenshot time cost greater than limit.\n\n if self.appear(MAIN_CHECK):\n if confirm_timer.reached():\n pass\n else:\n confirm_timer.reset()\n\n Also, It's a good idea to set `count`, to make alas run more stable on slow computers.\n Expected speed is 0.35 second / screenshot.\n \"\"\"\n self.limit = limit\n self.count = count\n self._current = 0\n self._reach_count = count\n\n def start(self):\n if not self.started():\n self._current = time.time()\n self._reach_count = 0\n\n return self\n\n def started(self):\n return bool(self._current)\n\n def current(self):\n \"\"\"\n Returns:\n float\n \"\"\"\n if self.started():\n return time.time() - self._current\n else:\n return 0.\n\n def set_current(self, current, count=0):\n self._current = time.time() - current\n self._reach_count = count\n\n def reached(self):\n \"\"\"\n Returns:\n bool\n \"\"\"\n self._reach_count += 1\n return time.time() - self._current > self.limit and self._reach_count > self.count\n\n def reset(self):\n self._current = time.time()\n self._reach_count = 0\n return self\n\n def clear(self):\n self._current = 0\n self._reach_count = self.count\n return self\n\n def reached_and_reset(self):\n \"\"\"\n Returns:\n bool:\n \"\"\"\n if self.reached():\n self.reset()\n return True\n else:\n return False\n\n def wait(self):\n \"\"\"\n Wait until timer reached.\n \"\"\"\n diff = self._current + self.limit - time.time()\n if diff > 0:\n time.sleep(diff)\n\n def show(self):\n from module.logger import logger\n logger.info(str(self))\n\n def __str__(self):\n return f'Timer(limit={round(self.current(), 3)}/{self.limit}, count={self._reach_count}/{self.count})'\n\n __repr__ = __str__" }, { "identifier": "Connection", "path": "module/device/connection.py", "snippet": "class Connection(ConnectionAttr):\n def __init__(self, config):\n \"\"\"\n Args:\n config (AzurLaneConfig, str): Name of the user config under ./config\n \"\"\"\n super().__init__(config)\n if not self.is_over_http:\n self.detect_device()\n\n # Connect\n self.adb_connect(self.serial)\n logger.attr('AdbDevice', self.adb)\n\n # Package\n if self.config.Emulator_PackageName == 'auto':\n self.detect_package()\n else:\n self.package = server_.to_package(self.config.Emulator_PackageName)\n # No set_server cause game client and UI language can be different\n # else:\n # set_server(self.package)\n logger.attr('Server', self.config.Emulator_PackageName)\n server_.server = self.config.Emulator_PackageName\n logger.attr('PackageName', self.package)\n server_.lang = self.config.Emulator_GameLanguage\n logger.attr('Lang', self.config.LANG)\n\n self.check_mumu_app_keep_alive()\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_command(self, cmd, timeout=10):\n \"\"\"\n Execute ADB commands in a subprocess,\n usually to be used when pulling or pushing large files.\n\n Args:\n cmd (list):\n timeout (int):\n\n Returns:\n str:\n \"\"\"\n cmd = list(map(str, cmd))\n cmd = [self.adb_binary, '-s', self.serial] + cmd\n logger.info(f'Execute: {cmd}')\n\n # Use shell=True to disable console window when using GUI.\n # Although, there's still a window when you stop running in GUI, which cause by gooey.\n # To disable it, edit gooey/gui/util/taskkill.py\n\n # No gooey anymore, just shell=False\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=False)\n try:\n stdout, stderr = process.communicate(timeout=timeout)\n except subprocess.TimeoutExpired:\n process.kill()\n stdout, stderr = process.communicate()\n logger.warning(f'TimeoutExpired when calling {cmd}, stdout={stdout}, stderr={stderr}')\n return stdout\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_command(self, cmd, timeout=10):\n logger.warning(\n f'adb_command() is not available when connecting over http: {self.serial}, '\n )\n raise RequestHumanTakeover\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to `adb -s <serial> shell <*cmd>`\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True and recvall=True\n socket if stream=True and recvall=False\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n if recvall:\n # bytes\n return recv_all(result)\n else:\n # socket\n return result\n else:\n result = self.adb.shell(cmd, stream=stream, timeout=timeout, rstrip=rstrip)\n result = remove_shell_warning(result)\n # str\n return result\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_shell(self, cmd, stream=False, recvall=True, timeout=10, rstrip=True):\n \"\"\"\n Equivalent to http://127.0.0.1:7912/shell?command={command}\n\n Args:\n cmd (list, str):\n stream (bool): Return stream instead of string output (Default: False)\n recvall (bool): Receive all data when stream=True (Default: True)\n timeout (int): (Default: 10)\n rstrip (bool): Strip the last empty line (Default: True)\n\n Returns:\n str if stream=False\n bytes if stream=True\n \"\"\"\n if not isinstance(cmd, str):\n cmd = list(map(str, cmd))\n\n if stream:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout)\n # Already received all, so `recvall` is ignored\n result = remove_shell_warning(result.content)\n # bytes\n return result\n else:\n result = self.u2.shell(cmd, stream=stream, timeout=timeout).output\n if rstrip:\n result = result.rstrip()\n result = remove_shell_warning(result)\n # str\n return result\n\n def adb_getprop(self, name):\n \"\"\"\n Get system property in Android, same as `getprop <name>`\n\n Args:\n name (str): Property name\n\n Returns:\n str:\n \"\"\"\n return self.adb_shell(['getprop', name]).strip()\n\n @cached_property\n def cpu_abi(self) -> str:\n \"\"\"\n Returns:\n str: arm64-v8a, armeabi-v7a, x86, x86_64\n \"\"\"\n abi = self.adb_getprop('ro.product.cpu.abi')\n if not len(abi):\n logger.error(f'CPU ABI invalid: \"{abi}\"')\n return abi\n\n @cached_property\n def sdk_ver(self) -> int:\n \"\"\"\n Android SDK/API levels, see https://apilevels.com/\n \"\"\"\n sdk = self.adb_getprop('ro.build.version.sdk')\n try:\n return int(sdk)\n except ValueError:\n logger.error(f'SDK version invalid: {sdk}')\n\n return 0\n\n @cached_property\n def is_avd(self):\n if get_serial_pair(self.serial)[0] is None:\n return False\n if 'ranchu' in self.adb_getprop('ro.hardware'):\n return True\n if 'goldfish' in self.adb_getprop('ro.hardware.audio.primary'):\n return True\n return False\n\n def check_mumu_app_keep_alive(self):\n if not self.is_mumu_family:\n return False\n\n res = self.adb_getprop('nemud.app_keep_alive')\n logger.attr('nemud.app_keep_alive', res)\n if res == '':\n # Empry property, might not be a mumu emulator or might be an old mumu\n return True\n elif res == 'false':\n # Disabled\n return True\n elif res == 'true':\n # https://mumu.163.com/help/20230802/35047_1102450.html\n logger.critical('请在MuMu模拟器设置内关闭 \"后台挂机时保活运行\"')\n raise RequestHumanTakeover\n else:\n logger.warning(f'Invalid nemud.app_keep_alive value: {res}')\n return False\n\n @cached_property\n def _nc_server_host_port(self):\n \"\"\"\n Returns:\n str, int, str, int:\n server_listen_host, server_listen_port, client_connect_host, client_connect_port\n \"\"\"\n # For BlueStacks hyper-v, use ADB reverse\n if self.is_bluestacks_hyperv:\n host = '127.0.0.1'\n logger.info(f'Connecting to BlueStacks hyper-v, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n # For emulators, listen on current host\n if self.is_emulator or self.is_over_http:\n try:\n host = socket.gethostbyname(socket.gethostname())\n except socket.gaierror as e:\n logger.error(e)\n logger.error(f'Unknown host name: {socket.gethostname()}')\n host = '127.0.0.1'\n if platform.system() == 'Linux' and host == '127.0.1.1':\n host = '127.0.0.1'\n logger.info(f'Connecting to local emulator, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n\n # For AVD instance\n if self.is_avd:\n return host, port, \"10.0.2.2\", port\n\n return host, port, host, port\n # For local network devices, listen on the host under the same network as target device\n if self.is_network_device:\n hosts = socket.gethostbyname_ex(socket.gethostname())[2]\n logger.info(f'Current hosts: {hosts}')\n ip = ipaddress.ip_address(self.serial.split(':')[0])\n for host in hosts:\n if ip in ipaddress.ip_interface(f'{host}/24').network:\n logger.info(f'Connecting to local network device, using host {host}')\n port = random_port(self.config.FORWARD_PORT_RANGE)\n return host, port, host, port\n # For other devices, create an ADB reverse and listen on 127.0.0.1\n host = '127.0.0.1'\n logger.info(f'Connecting to unknown device, using host {host}')\n port = self.adb_reverse(f'tcp:{self.config.REVERSE_SERVER_PORT}')\n return host, port, host, self.config.REVERSE_SERVER_PORT\n\n @cached_property\n def reverse_server(self):\n \"\"\"\n Setup a server on Alas, access it from emulator.\n This will bypass adb shell and be faster.\n \"\"\"\n del_cached_property(self, '_nc_server_host_port')\n host_port = self._nc_server_host_port\n logger.info(f'Reverse server listening on {host_port[0]}:{host_port[1]}, '\n f'client can send data to {host_port[2]}:{host_port[3]}')\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.bind(host_port[:2])\n server.settimeout(5)\n server.listen(5)\n return server\n\n @cached_property\n def nc_command(self):\n \"\"\"\n Returns:\n list[str]: ['nc'] or ['busybox', 'nc']\n \"\"\"\n sdk = self.sdk_ver\n logger.info(f'sdk_ver: {sdk}')\n if sdk >= 28:\n # Android 9 emulators does not have `nc`, try `busybox nc`\n # BlueStacks Pie (Android 9) has `nc` but cannot send data, try `busybox nc` first\n trial = [\n ['busybox', 'nc'],\n ['nc'],\n ]\n else:\n trial = [\n ['nc'],\n ['busybox', 'nc'],\n ]\n for command in trial:\n # About 3ms\n result = self.adb_shell(command)\n # Result should be command help if success\n # `/system/bin/sh: nc: not found`\n if 'not found' in result:\n continue\n # `/system/bin/sh: busybox: inaccessible or not found\\n`\n if 'inaccessible' in result:\n continue\n logger.attr('nc command', command)\n return command\n\n logger.error('No `netcat` command available, please use screenshot methods without `_nc` suffix')\n raise RequestHumanTakeover\n\n def adb_shell_nc(self, cmd, timeout=5, chunk_size=262144):\n \"\"\"\n Args:\n cmd (list):\n timeout (int):\n chunk_size (int): Default to 262144\n\n Returns:\n bytes:\n \"\"\"\n # Server start listening\n server = self.reverse_server\n server.settimeout(timeout)\n # Client send data, waiting for server accept\n # <command> | nc 127.0.0.1 {port}\n cmd += [\"|\", *self.nc_command, *self._nc_server_host_port[2:]]\n stream = self.adb_shell(cmd, stream=True, recvall=False)\n try:\n # Server accept connection\n conn, conn_port = server.accept()\n except socket.timeout:\n output = recv_all(stream, chunk_size=chunk_size)\n logger.warning(str(output))\n raise AdbTimeout('reverse server accept timeout')\n\n # Server receive data\n data = recv_all(conn, chunk_size=chunk_size, recv_interval=0.001)\n\n # Server close connection\n conn.close()\n return data\n\n def adb_exec_out(self, cmd, serial=None):\n cmd.insert(0, 'exec-out')\n return self.adb_command(cmd, serial)\n\n def adb_forward(self, remote):\n \"\"\"\n Do `adb forward <local> <remote>`.\n choose a random port in FORWARD_PORT_RANGE or reuse an existing forward,\n and also remove redundant forwards.\n\n Args:\n remote (str):\n tcp:<port>\n localabstract:<unix domain socket name>\n localreserved:<unix domain socket name>\n localfilesystem:<unix domain socket name>\n dev:<character device name>\n jdwp:<process pid> (remote only)\n\n Returns:\n int: Port\n \"\"\"\n port = 0\n for forward in self.adb.forward_list():\n if forward.serial == self.serial and forward.remote == remote and forward.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse forward: {forward}')\n port = int(forward.local[4:])\n else:\n logger.info(f'Remove redundant forward: {forward}')\n self.adb_forward_remove(forward.local)\n\n if port:\n return port\n else:\n # Create new forward\n port = random_port(self.config.FORWARD_PORT_RANGE)\n forward = ForwardItem(self.serial, f'tcp:{port}', remote)\n logger.info(f'Create forward: {forward}')\n self.adb.forward(forward.local, forward.remote)\n return port\n\n def adb_reverse(self, remote):\n port = 0\n for reverse in self.adb.reverse_list():\n if reverse.remote == remote and reverse.local.startswith('tcp:'):\n if not port:\n logger.info(f'Reuse reverse: {reverse}')\n port = int(reverse.local[4:])\n else:\n logger.info(f'Remove redundant forward: {reverse}')\n self.adb_forward_remove(reverse.local)\n\n if port:\n return port\n else:\n # Create new reverse\n port = random_port(self.config.FORWARD_PORT_RANGE)\n reverse = ReverseItem(f'tcp:{port}', remote)\n logger.info(f'Create reverse: {reverse}')\n self.adb.reverse(reverse.local, reverse.remote)\n return port\n\n def adb_forward_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> forward --remove <local>`\n More about the commands send to ADB server, see:\n https://cs.android.com/android/platform/superproject/+/master:packages/modules/adb/SERVICES.TXT\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n list_cmd = f\"host-serial:{self.serial}:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_reverse_remove(self, local):\n \"\"\"\n Equivalent to `adb -s <serial> reverse --remove <local>`\n\n Args:\n local (str): Such as 'tcp:2437'\n \"\"\"\n with self.adb_client._connect() as c:\n c.send_command(f\"host:transport:{self.serial}\")\n c.check_okay()\n list_cmd = f\"reverse:killforward:{local}\"\n c.send_command(list_cmd)\n c.check_okay()\n\n def adb_push(self, local, remote):\n \"\"\"\n Args:\n local (str):\n remote (str):\n\n Returns:\n str:\n \"\"\"\n cmd = ['push', local, remote]\n return self.adb_command(cmd)\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_connect(self, serial):\n \"\"\"\n Connect to a serial, try 3 times at max.\n If there's an old ADB server running while Alas is using a newer one, which happens on Chinese emulators,\n the first connection is used to kill the other one, and the second is the real connect.\n\n Args:\n serial (str):\n\n Returns:\n bool: If success\n \"\"\"\n # Disconnect offline device before connecting\n for device in self.list_device():\n if device.status == 'offline':\n logger.warning(f'Device {serial} is offline, disconnect it before connecting')\n self.adb_disconnect(serial)\n elif device.status == 'unauthorized':\n logger.error(f'Device {serial} is unauthorized, please accept ADB debugging on your device')\n elif device.status == 'device':\n pass\n else:\n logger.warning(f'Device {serial} is is having a unknown status: {device.status}')\n\n # Skip for emulator-5554\n if 'emulator-' in serial:\n logger.info(f'\"{serial}\" is a `emulator-*` serial, skip adb connect')\n return True\n if re.match(r'^[a-zA-Z0-9]+$', serial):\n logger.info(f'\"{serial}\" seems to be a Android serial, skip adb connect')\n return True\n\n # Try to connect\n for _ in range(3):\n msg = self.adb_client.connect(serial)\n logger.info(msg)\n if 'connected' in msg:\n # Connected to 127.0.0.1:59865\n # Already connected to 127.0.0.1:59865\n return True\n elif 'bad port' in msg:\n # bad port number '598265' in '127.0.0.1:598265'\n logger.error(msg)\n possible_reasons('Serial incorrect, might be a typo')\n raise RequestHumanTakeover\n elif '(10061)' in msg:\n # cannot connect to 127.0.0.1:55555:\n # No connection could be made because the target machine actively refused it. (10061)\n logger.info(msg)\n logger.warning('No such device exists, please restart the emulator or set a correct serial')\n raise EmulatorNotRunningError\n\n # Failed to connect\n logger.warning(f'Failed to connect {serial} after 3 trial, assume connected')\n self.detect_device()\n return False\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_connect(self, serial):\n # No adb connect if over http\n return True\n\n def adb_disconnect(self, serial):\n msg = self.adb_client.disconnect(serial)\n if msg:\n logger.info(msg)\n\n del_cached_property(self, 'hermit_session')\n del_cached_property(self, 'droidcast_session')\n del_cached_property(self, 'minitouch_builder')\n del_cached_property(self, 'reverse_server')\n\n def adb_restart(self):\n \"\"\"\n Reboot adb client\n \"\"\"\n logger.info('Restart adb')\n # Kill current client\n self.adb_client.server_kill()\n # Init adb client\n del_cached_property(self, 'adb_client')\n _ = self.adb_client\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def adb_reconnect(self):\n \"\"\"\n Reboot adb client if no device found, otherwise try reconnecting device.\n \"\"\"\n if self.config.Emulator_AdbRestart and len(self.list_device()) == 0:\n # Restart Adb\n self.adb_restart()\n # Connect to device\n self.adb_connect(self.serial)\n self.detect_device()\n else:\n self.adb_disconnect(self.serial)\n self.adb_connect(self.serial)\n self.detect_device()\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def adb_reconnect(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'adb_reconnect() is skipped, you may need to restart ATX manually'\n )\n\n def install_uiautomator2(self):\n \"\"\"\n Init uiautomator2 and remove minicap.\n \"\"\"\n logger.info('Install uiautomator2')\n init = u2.init.Initer(self.adb, loglevel=logging.DEBUG)\n # MuMu X has no ro.product.cpu.abi, pick abi from ro.product.cpu.abilist\n if init.abi not in ['x86_64', 'x86', 'arm64-v8a', 'armeabi-v7a', 'armeabi']:\n init.abi = init.abis[0]\n init.set_atx_agent_addr('127.0.0.1:7912')\n try:\n init.install()\n except ConnectionError:\n u2.init.GITHUB_BASEURL = 'http://tool.appetizer.io/openatx'\n init.install()\n self.uninstall_minicap()\n\n def uninstall_minicap(self):\n \"\"\" minicap can't work or will send compressed images on some emulators. \"\"\"\n logger.info('Removing minicap')\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap\"])\n self.adb_shell([\"rm\", \"/data/local/tmp/minicap.so\"])\n\n @Config.when(DEVICE_OVER_HTTP=False)\n def restart_atx(self):\n \"\"\"\n Minitouch supports only one connection at a time.\n Restart ATX to kick the existing one.\n \"\"\"\n logger.info('Restart ATX')\n atx_agent_path = '/data/local/tmp/atx-agent'\n self.adb_shell([atx_agent_path, 'server', '--stop'])\n self.adb_shell([atx_agent_path, 'server', '--nouia', '-d', '--addr', '127.0.0.1:7912'])\n\n @Config.when(DEVICE_OVER_HTTP=True)\n def restart_atx(self):\n logger.warning(\n f'When connecting a device over http: {self.serial} '\n f'restart_atx() is skipped, you may need to restart ATX manually'\n )\n\n @staticmethod\n def sleep(second):\n \"\"\"\n Args:\n second(int, float, tuple):\n \"\"\"\n time.sleep(ensure_time(second))\n\n _orientation_description = {\n 0: 'Normal',\n 1: 'HOME key on the right',\n 2: 'HOME key on the top',\n 3: 'HOME key on the left',\n }\n orientation = 0\n\n @retry\n def get_orientation(self):\n \"\"\"\n Rotation of the phone\n\n Returns:\n int:\n 0: 'Normal'\n 1: 'HOME key on the right'\n 2: 'HOME key on the top'\n 3: 'HOME key on the left'\n \"\"\"\n _DISPLAY_RE = re.compile(\n r'.*DisplayViewport{.*valid=true, .*orientation=(?P<orientation>\\d+), .*deviceWidth=(?P<width>\\d+), deviceHeight=(?P<height>\\d+).*'\n )\n output = self.adb_shell(['dumpsys', 'display'])\n\n res = _DISPLAY_RE.search(output, 0)\n\n if res:\n o = int(res.group('orientation'))\n if o in Connection._orientation_description:\n pass\n else:\n o = 0\n logger.warning(f'Invalid device orientation: {o}, assume it is normal')\n else:\n o = 0\n logger.warning('Unable to get device orientation, assume it is normal')\n\n self.orientation = o\n logger.attr('Device Orientation', f'{o} ({Connection._orientation_description.get(o, \"Unknown\")})')\n return o\n\n @retry\n def list_device(self):\n \"\"\"\n Returns:\n SelectedGrids[AdbDeviceWithStatus]:\n \"\"\"\n devices = []\n try:\n with self.adb_client._connect() as c:\n c.send_command(\"host:devices\")\n c.check_okay()\n output = c.read_string_block()\n for line in output.splitlines():\n parts = line.strip().split(\"\\t\")\n if len(parts) != 2:\n continue\n device = AdbDeviceWithStatus(self.adb_client, parts[0], parts[1])\n devices.append(device)\n except ConnectionResetError as e:\n # Happens only on CN users.\n # ConnectionResetError: [WinError 10054] 远程主机强迫关闭了一个现有的连接。\n logger.error(e)\n if '强迫关闭' in str(e):\n logger.critical('无法连接至ADB服务,请关闭UU加速器、原神私服、以及一些劣质代理软件。'\n '它们会劫持电脑上所有的网络连接,包括Alas与模拟器之间的本地连接。')\n return SelectedGrids(devices)\n\n def detect_device(self):\n \"\"\"\n Find available devices\n If serial=='auto' and only 1 device detected, use it\n \"\"\"\n logger.hr('Detect device')\n logger.info('Here are the available devices, '\n 'copy to Alas.Emulator.Serial to use it or set Alas.Emulator.Serial=\"auto\"')\n devices = self.list_device()\n\n # Show available devices\n available = devices.select(status='device')\n for device in available:\n logger.info(device.serial)\n if not len(available):\n logger.info('No available devices')\n\n # Show unavailable devices if having any\n unavailable = devices.delete(available)\n if len(unavailable):\n logger.info('Here are the devices detected but unavailable')\n for device in unavailable:\n logger.info(f'{device.serial} ({device.status})')\n\n # Auto device detection\n if self.config.Emulator_Serial == 'auto':\n if available.count == 0:\n logger.critical('No available device found, auto device detection cannot work, '\n 'please set an exact serial in Alas.Emulator.Serial instead of using \"auto\"')\n raise RequestHumanTakeover\n elif available.count == 1:\n logger.info(f'Auto device detection found only one device, using it')\n self.serial = devices[0].serial\n del_cached_property(self, 'adb')\n else:\n logger.critical('Multiple devices found, auto device detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.Serial')\n raise RequestHumanTakeover\n\n # Handle LDPlayer\n # LDPlayer serial jumps between `127.0.0.1:5555+{X}` and `emulator-5554+{X}`\n port_serial, emu_serial = get_serial_pair(self.serial)\n if port_serial and emu_serial:\n # Might be LDPlayer, check connected devices\n port_device = devices.select(serial=port_serial).first_or_none()\n emu_device = devices.select(serial=emu_serial).first_or_none()\n if port_device and emu_device:\n # Paired devices found, check status to get the correct one\n if port_device.status == 'device' and emu_device.status == 'offline':\n self.serial = port_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif port_device.status == 'offline' and emu_device.status == 'device':\n self.serial = emu_serial\n logger.info(f'LDPlayer device pair found: {port_device}, {emu_device}. '\n f'Using serial: {self.serial}')\n elif not devices.select(serial=self.serial):\n # Current serial not found\n if port_device and not emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {port_serial} found. '\n f'Using serial: {port_serial}')\n self.serial = port_serial\n if not port_device and emu_device:\n logger.info(f'Current serial {self.serial} not found but paired device {emu_serial} found. '\n f'Using serial: {emu_serial}')\n self.serial = emu_serial\n\n @retry\n def list_package(self, show_log=True):\n \"\"\"\n Find all packages on device.\n Use dumpsys first for faster.\n \"\"\"\n # 80ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(r'dumpsys package | grep \"Package \\[\"')\n packages = re.findall(r'Package \\[([^\\s]+)\\]', output)\n if len(packages):\n return packages\n\n # 200ms\n if show_log:\n logger.info('Get package list')\n output = self.adb_shell(['pm', 'list', 'packages'])\n packages = re.findall(r'package:([^\\s]+)', output)\n return packages\n\n def list_azurlane_packages(self, show_log=True):\n \"\"\"\n Args:\n show_log:\n\n Returns:\n list[str]: List of package names\n \"\"\"\n packages = self.list_package(show_log=show_log)\n packages = [p for p in packages if p in server_.VALID_PACKAGE]\n return packages\n\n def detect_package(self, set_config=True):\n \"\"\"\n Show all possible packages with the given keyword on this device.\n \"\"\"\n logger.hr('Detect package')\n packages = self.list_azurlane_packages()\n\n # Show packages\n logger.info(f'Here are the available packages in device \"{self.serial}\", '\n f'copy to Alas.Emulator.PackageName to use it')\n if len(packages):\n for package in packages:\n logger.info(package)\n else:\n logger.info(f'No available packages on device \"{self.serial}\"')\n\n # Auto package detection\n if len(packages) == 0:\n logger.critical(f'No Blue Archive package found, '\n f'please confirm Blue Archive has been installed on device \"{self.serial}\"')\n raise RequestHumanTakeover\n if len(packages) == 1:\n logger.info('Auto package detection found only one package, using it')\n self.package = packages[0]\n # Set config\n if set_config:\n self.config.Emulator_PackageName = server_.to_server(self.package)\n # Set server\n # logger.info('Server changed, release resources')\n # set_server(self.package)\n else:\n logger.critical(\n f'Multiple Blue Archive packages found, auto package detection cannot decide which to choose, '\n 'please copy one of the available devices listed above to Alas.Emulator.PackageName')\n raise RequestHumanTakeover" }, { "identifier": "RETRY_TRIES", "path": "module/device/method/utils.py", "snippet": "RETRY_TRIES = 5" }, { "identifier": "retry_sleep", "path": "module/device/method/utils.py", "snippet": "def retry_sleep(trial):\n # First trial\n if trial == 0:\n pass\n # Failed once, fast retry\n elif trial == 1:\n pass\n # Failed twice\n elif trial == 2:\n time.sleep(1)\n # Failed more\n else:\n time.sleep(RETRY_DELAY)" }, { "identifier": "handle_adb_error", "path": "module/device/method/utils.py", "snippet": "def handle_adb_error(e):\n \"\"\"\n Args:\n e (Exception):\n\n Returns:\n bool: If should retry\n \"\"\"\n text = str(e)\n if 'not found' in text:\n # When you call `adb disconnect <serial>`\n # Or when adb server was killed (low possibility)\n # AdbError(device '127.0.0.1:59865' not found)\n logger.error(e)\n return True\n elif 'timeout' in text:\n # AdbTimeout(adb read timeout)\n logger.error(e)\n return True\n elif 'closed' in text:\n # AdbError(closed)\n # Usually after AdbTimeout(adb read timeout)\n # Disconnect and re-connect should fix this.\n logger.error(e)\n return True\n elif 'device offline' in text:\n # AdbError(device offline)\n # When a device that has been connected wirelessly is disconnected passively,\n # it does not disappear from the adb device list,\n # but will be displayed as offline.\n # In many cases, such as disconnection and recovery caused by network fluctuations,\n # or after VMOS reboot when running Alas on a phone,\n # the device is still available, but it needs to be disconnected and re-connected.\n logger.error(e)\n return True\n elif 'is offline' in text:\n # RuntimeError: USB device 127.0.0.1:7555 is offline\n # Raised by uiautomator2 when current adb service is killed by another version of adb service.\n logger.error(e)\n return True\n elif 'unknown host service' in text:\n # AdbError(unknown host service)\n # Another version of ADB service started, current ADB service has been killed.\n # Usually because user opened a Chinese emulator, which uses ADB from the Stone Age.\n logger.error(e)\n return True\n else:\n # AdbError()\n logger.exception(e)\n possible_reasons(\n 'If you are using BlueStacks or LD player or WSA, please enable ADB in the settings of your emulator',\n 'Emulator died, please restart emulator',\n 'Serial incorrect, no such device exists or emulator is not running'\n )\n return False" }, { "identifier": "RequestHumanTakeover", "path": "module/exception.py", "snippet": "class RequestHumanTakeover(Exception):\n # Request human takeover\n # Alas is unable to handle such error, probably because of wrong settings.\n pass" }, { "identifier": "ScriptError", "path": "module/exception.py", "snippet": "class ScriptError(Exception):\n # This is likely to be a mistake of developers, but sometimes a random issue\n pass" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" } ]
import asyncio import json import re import socket import time import websockets from functools import wraps from typing import List from adbutils.errors import AdbError from uiautomator2 import _Service from module.base.decorator import Config, cached_property, del_cached_property from module.base.timer import Timer from module.base.utils import * from module.device.connection import Connection from module.device.method.utils import RETRY_TRIES, retry_sleep, handle_adb_error from module.exception import RequestHumanTakeover, ScriptError from module.logger import logger
12,465
""" DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None
def random_normal_distribution(a, b, n=5): output = np.mean(np.random.uniform(a, b, size=n)) return output def random_theta(): theta = np.random.uniform(0, 2 * np.pi) return np.array([np.sin(theta), np.cos(theta)]) def random_rho(dis): return random_normal_distribution(-dis, dis) def insert_swipe(p0, p3, speed=15, min_distance=10): """ Insert way point from start to end. First generate a cubic bézier curve Args: p0: Start point. p3: End point. speed: Average move speed, pixels per 10ms. min_distance: Returns: list[list[int]]: List of points. Examples: > insert_swipe((400, 400), (600, 600), speed=20) [[400, 400], [406, 406], [416, 415], [429, 428], [444, 442], [462, 459], [481, 478], [504, 500], [527, 522], [545, 540], [560, 557], [573, 570], [584, 582], [592, 590], [597, 596], [600, 600]] """ p0 = np.array(p0) p3 = np.array(p3) # Random control points in Bézier curve distance = np.linalg.norm(p3 - p0) p1 = 2 / 3 * p0 + 1 / 3 * p3 + random_theta() * random_rho(distance * 0.1) p2 = 1 / 3 * p0 + 2 / 3 * p3 + random_theta() * random_rho(distance * 0.1) # Random `t` on Bézier curve, sparse in the middle, dense at start and end segments = max(int(distance / speed) + 1, 5) lower = random_normal_distribution(-85, -60) upper = random_normal_distribution(80, 90) theta = np.arange(lower + 0., upper + 0.0001, (upper - lower) / segments) ts = np.sin(theta / 180 * np.pi) ts = np.sign(ts) * abs(ts) ** 0.9 ts = (ts - min(ts)) / (max(ts) - min(ts)) # Generate cubic Bézier curve points = [] prev = (-100, -100) for t in ts: point = p0 * (1 - t) ** 3 + 3 * p1 * t * (1 - t) ** 2 + 3 * p2 * t ** 2 * (1 - t) + p3 * t ** 3 point = point.astype(int).tolist() if np.linalg.norm(np.subtract(point, prev)) < min_distance: continue points.append(point) prev = point # Delete nearing points if len(points[1:]): distance = np.linalg.norm(np.subtract(points[1:], points[0]), axis=1) mask = np.append(True, distance > min_distance) points = np.array(points)[mask].tolist() else: points = [p0, p3] return points class Command: def __init__( self, operation: str, contact: int = 0, x: int = 0, y: int = 0, ms: int = 10, pressure: int = 100 ): """ See https://github.com/openstf/minitouch#writable-to-the-socket Args: operation: c, r, d, m, u, w contact: x: y: ms: pressure: """ self.operation = operation self.contact = contact self.x = x self.y = y self.ms = ms self.pressure = pressure def to_minitouch(self) -> str: """ String that write into minitouch socket """ if self.operation == 'c': return f'{self.operation}\n' elif self.operation == 'r': return f'{self.operation}\n' elif self.operation == 'd': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'm': return f'{self.operation} {self.contact} {self.x} {self.y} {self.pressure}\n' elif self.operation == 'u': return f'{self.operation} {self.contact}\n' elif self.operation == 'w': return f'{self.operation} {self.ms}\n' else: return '' def to_atx_agent(self, max_x=1280, max_y=720) -> str: """ Dict that send to atx-agent, $DEVICE_URL/minitouch See https://github.com/openatx/atx-agent#minitouch%E6%93%8D%E4%BD%9C%E6%96%B9%E6%B3%95 """ x, y = self.x / max_x, self.y / max_y if self.operation == 'c': out = dict(operation=self.operation) elif self.operation == 'r': out = dict(operation=self.operation) elif self.operation == 'd': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'm': out = dict(operation=self.operation, index=self.contact, pressure=self.pressure, xP=x, yP=y) elif self.operation == 'u': out = dict(operation=self.operation, index=self.contact) elif self.operation == 'w': out = dict(operation=self.operation, milliseconds=self.ms) else: out = dict() return json.dumps(out) class CommandBuilder: """Build command str for minitouch. You can use this, to custom actions as you wish:: with safe_connection(_DEVICE_ID) as connection: builder = CommandBuilder() builder.down(0, 400, 400, 50) builder.commit() builder.move(0, 500, 500, 50) builder.commit() builder.move(0, 800, 400, 50) builder.commit() builder.up(0) builder.commit() builder.publish(connection) """ DEFAULT_DELAY = 0.05 max_x = 1280 max_y = 720 def __init__(self, device, contact=0, handle_orientation=True): """ Args: device: """ self.device = device self.commands = [] self.delay = 0 self.contact = contact self.handle_orientation = handle_orientation @property def orientation(self): if self.handle_orientation: return self.device.orientation else: return 0 def convert(self, x, y): max_x, max_y = self.device.max_x, self.device.max_y orientation = self.orientation if orientation == 0: pass elif orientation == 1: x, y = 720 - y, x max_x, max_y = max_y, max_x elif orientation == 2: x, y = 1280 - x, 720 - y elif orientation == 3: x, y = y, 1280 - x max_x, max_y = max_y, max_x else: raise ScriptError(f'Invalid device orientation: {orientation}') self.max_x, self.max_y = max_x, max_y if not self.device.config.DEVICE_OVER_HTTP: # Maximum X and Y coordinates may, but usually do not, match the display size. x, y = int(x / 1280 * max_x), int(y / 720 * max_y) else: # When over http, max_x and max_y are default to 1280 and 720, skip matching display size x, y = int(x), int(y) return x, y def commit(self): """ add minitouch command: 'c\n' """ self.commands.append(Command('c')) return self def reset(self): """ add minitouch command: 'r\n' """ self.commands.append(Command('r')) return self def wait(self, ms=10): """ add minitouch command: 'w <ms>\n' """ self.commands.append(Command('w', ms=ms)) self.delay += ms return self def up(self): """ add minitouch command: 'u <contact>\n' """ self.commands.append(Command('u', contact=self.contact)) return self def down(self, x, y, pressure=100): """ add minitouch command: 'd <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('d', x=x, y=y, contact=self.contact, pressure=pressure)) return self def move(self, x, y, pressure=100): """ add minitouch command: 'm <contact> <x> <y> <pressure>\n' """ x, y = self.convert(x, y) self.commands.append(Command('m', x=x, y=y, contact=self.contact, pressure=pressure)) return self def clear(self): """ clear current commands """ self.commands = [] self.delay = 0 def to_minitouch(self) -> str: return ''.join([command.to_minitouch() for command in self.commands]) def to_atx_agent(self) -> List[str]: return [command.to_atx_agent(self.max_x, self.max_y) for command in self.commands] def send(self): return self.device.minitouch_send(builder=self) class MinitouchNotInstalledError(Exception): pass class MinitouchOccupiedError(Exception): pass class U2Service(_Service): def __init__(self, name, u2obj): self.name = name self.u2obj = u2obj self.service_url = self.u2obj.path2url("/services/" + name) def retry(func): @wraps(func) def retry_wrapper(self, *args, **kwargs): """ Args: self (Minitouch): """ init = None
for _ in range(RETRY_TRIES):
5
2023-11-01 07:09:45+00:00
16k
BrianPugh/cyclopts
tests/test_help.py
[ { "identifier": "App", "path": "cyclopts/core.py", "snippet": "class App:\n _name: Optional[Tuple[str, ...]] = field(default=None, alias=\"name\", converter=optional_to_tuple_converter)\n\n _help: Optional[str] = field(default=None, alias=\"help\")\n\n usage: Optional[str] = field(default=None)\n\n # Everything below must be kw_only\n\n default_command: Optional[Callable] = field(default=None, converter=_validate_default_command, kw_only=True)\n default_parameter: Optional[Parameter] = field(default=None, kw_only=True)\n\n version: Union[None, str, Callable] = field(factory=_default_version, kw_only=True)\n version_flags: Tuple[str, ...] = field(\n default=[\"--version\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n show: bool = field(default=True, kw_only=True)\n\n help_flags: Tuple[str, ...] = field(\n default=[\"--help\", \"-h\"],\n on_setattr=attrs.setters.frozen,\n converter=to_tuple_converter,\n kw_only=True,\n )\n\n # This can ONLY ever be Tuple[Union[Group, str], ...] due to converter.\n # The other types is to make mypy happy for Cyclopts users.\n group: Union[Group, str, Tuple[Union[Group, str], ...]] = field(\n default=None, converter=to_tuple_converter, kw_only=True\n )\n\n group_arguments: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_arguments()),\n kw_only=True,\n )\n group_parameters: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_parameters()),\n kw_only=True,\n )\n group_commands: Group = field(\n default=None,\n converter=GroupConverter(Group.create_default_commands()),\n kw_only=True,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n validator: List[Callable] = field(default=None, converter=to_list_converter, kw_only=True)\n\n ######################\n # Private Attributes #\n ######################\n # Maps CLI-name of a command to a function handle.\n _commands: Dict[str, \"App\"] = field(init=False, factory=dict)\n\n _parents: List[\"App\"] = field(init=False, factory=list)\n\n _meta: \"App\" = field(init=False, default=None)\n _meta_parent: \"App\" = field(init=False, default=None)\n\n def __attrs_post_init__(self):\n if self.help_flags:\n self.command(\n self.help_print,\n name=self.help_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display this message and exit.\",\n )\n if self.version_flags:\n self.command(\n self.version_print,\n name=self.version_flags,\n help_flags=[],\n version_flags=[],\n help=\"Display application version.\",\n )\n\n ###########\n # Methods #\n ###########\n\n @property\n def name(self) -> Tuple[str, ...]:\n \"\"\"Application name(s). Dynamically derived if not previously set.\"\"\"\n if self._name:\n return self._name\n elif self.default_command is None:\n name = Path(sys.argv[0]).name\n if name == \"__main__.py\":\n name = _get_root_module_name()\n return (name,)\n else:\n return (_format_name(self.default_command.__name__),)\n\n @property\n def help(self) -> str:\n if self._help is not None:\n return self._help\n elif self.default_command is None:\n # Try and fallback to a meta-app docstring.\n if self._meta is None:\n return \"\"\n else:\n return self.meta.help\n elif self.default_command.__doc__ is None:\n return \"\"\n else:\n return self.default_command.__doc__\n\n @help.setter\n def help(self, value):\n self._help = value\n\n def version_print(self) -> None:\n \"\"\"Print the application version.\"\"\"\n print(self.version() if callable(self.version) else self.version)\n\n def __getitem__(self, key: str) -> \"App\":\n \"\"\"Get the subapp from a command string.\n\n All commands get registered to Cyclopts as subapps.\n The actual function handler is at ``app[key].default_command``.\n \"\"\"\n if self._meta:\n with suppress(KeyError):\n return self.meta[key]\n return self._commands[key]\n\n def __contains__(self, k: str) -> bool:\n if k in self._commands:\n return True\n if self._meta_parent:\n return k in self._meta_parent\n return False\n\n @property\n def meta(self) -> \"App\":\n if self._meta is None:\n self._meta = type(self)(\n group_commands=copy(self.group_commands),\n group_arguments=copy(self.group_arguments),\n group_parameters=copy(self.group_parameters),\n )\n self._meta._meta_parent = self\n return self._meta\n\n def _parse_command_chain(self, tokens):\n command_chain = []\n app = self\n apps = [app]\n unused_tokens = tokens\n\n command_mapping = _combined_meta_command_mapping(app)\n\n for i, token in enumerate(tokens):\n if token in self.help_flags:\n break\n try:\n app = command_mapping[token]\n apps.append(app)\n unused_tokens = tokens[i + 1 :]\n except KeyError:\n break\n command_chain.append(token)\n command_mapping = _combined_meta_command_mapping(app)\n\n return command_chain, apps, unused_tokens\n\n def command(\n self,\n obj: Optional[Callable] = None,\n name: Union[None, str, Iterable[str]] = None,\n **kwargs,\n ) -> Callable:\n \"\"\"Decorator to register a function as a CLI command.\n\n Parameters\n ----------\n obj: Optional[Callable]\n Function or :class:`App` to be registered as a command.\n name: Union[None, str, Iterable[str]]\n Name(s) to register the ``obj`` to.\n If not provided, defaults to:\n\n * If registering an :class:`App`, then the app's name.\n * If registering a function, then the function's name.\n `**kwargs`\n Any argument that :class:`App` can take.\n \"\"\"\n if obj is None: # Called ``@app.command(...)``\n return partial(self.command, name=name, **kwargs)\n\n if isinstance(obj, App):\n app = obj\n\n if app._name is None and name is None:\n raise ValueError(\"Sub-app MUST have a name specified.\")\n\n if kwargs:\n raise ValueError(\"Cannot supplied additional configuration when registering a sub-App.\")\n else:\n validate_command(obj)\n kwargs.setdefault(\"help_flags\", [])\n kwargs.setdefault(\"version_flags\", [])\n if \"group_commands\" not in kwargs:\n kwargs[\"group_commands\"] = copy(self.group_commands)\n if \"group_parameters\" not in kwargs:\n kwargs[\"group_parameters\"] = copy(self.group_parameters)\n if \"group_arguments\" not in kwargs:\n kwargs[\"group_arguments\"] = copy(self.group_arguments)\n app = App(default_command=obj, **kwargs)\n # app.name is handled below\n\n if name is None:\n name = app.name\n else:\n app._name = name\n\n for n in to_tuple_converter(name):\n if n in self:\n raise CommandCollisionError(f'Command \"{n}\" already registered.')\n\n # Warning: app._name may not align with command name\n self._commands[n] = app\n\n app._parents.append(self)\n\n return obj\n\n def default(\n self,\n obj: Optional[Callable] = None,\n *,\n converter=None,\n validator=None,\n ):\n \"\"\"Decorator to register a function as the default action handler.\"\"\"\n if obj is None: # Called ``@app.default_command(...)``\n return partial(self.default, converter=converter, validator=validator)\n\n if isinstance(obj, App): # Registering a sub-App\n raise TypeError(\"Cannot register a sub-App to default.\")\n\n if self.default_command is not None:\n raise CommandCollisionError(f\"Default command previously set to {self.default_command}.\")\n\n validate_command(obj)\n self.default_command = obj\n if converter:\n self.converter = converter\n if validator:\n self.validator = validator\n return obj\n\n def parse_known_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> Tuple[Callable, inspect.BoundArguments, List[str]]:\n \"\"\"Interpret arguments into a function, :class:`~inspect.BoundArguments`, and any remaining unknown tokens.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``\n\n Returns\n -------\n command: Callable\n Bare function to execute.\n\n bound: inspect.BoundArguments\n Bound arguments for ``command``.\n\n unused_tokens: List[str]\n Any remaining CLI tokens that didn't get parsed for ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n command_chain, apps, unused_tokens = self._parse_command_chain(tokens)\n command_app = apps[-1]\n\n try:\n parent_app = apps[-2]\n except IndexError:\n parent_app = None\n\n try:\n if command_app.default_command:\n command = command_app.default_command\n resolved_command = ResolvedCommand(\n command,\n _resolve_default_parameter(apps),\n command_app.group_arguments,\n command_app.group_parameters,\n parse_docstring=False,\n )\n # We want the resolved group that ``app`` belongs to.\n if parent_app is None:\n command_groups = []\n else:\n command_groups = _get_command_groups(parent_app, command_app)\n\n bound, unused_tokens = create_bound_arguments(resolved_command, unused_tokens)\n try:\n if command_app.converter:\n bound.arguments = command_app.converter(**bound.arguments)\n for command_group in command_groups:\n if command_group.converter:\n bound.arguments = command_group.converter(**bound.arguments)\n for validator in command_app.validator:\n validator(**bound.arguments)\n for command_group in command_groups:\n for validator in command_group.validator:\n validator(**bound.arguments)\n except (AssertionError, ValueError, TypeError) as e:\n new_exception = ValidationError(value=e.args[0])\n raise new_exception from e\n\n return command, bound, unused_tokens\n else:\n if unused_tokens:\n raise InvalidCommandError(unused_tokens=unused_tokens)\n else:\n # Running the application with no arguments and no registered\n # ``default_command`` will default to ``help_print``.\n command = self.help_print\n bound = inspect.signature(command).bind(tokens=tokens, console=console)\n return command, bound, []\n except CycloptsError as e:\n e.app = command_app\n if command_chain:\n e.command_chain = command_chain\n raise\n\n raise NotImplementedError(\"Should never get here.\")\n\n def parse_args(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ) -> Tuple[Callable, inspect.BoundArguments]:\n \"\"\"Interpret arguments into a function and :class:`~inspect.BoundArguments`.\n\n **Does** handle special flags like \"version\" or \"help\".\n\n Raises\n ------\n UnusedCliTokensError\n If any tokens remain after parsing.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n command: Callable\n Function associated with command action.\n\n bound: inspect.BoundArguments\n Parsed and converted ``args`` and ``kwargs`` to be used when calling ``command``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n meta_parent = self\n\n try:\n # Special flags (help/version) get bubbled up to the root app.\n # The root ``help_print`` will then traverse the meta app linked list.\n\n # The Help Flag is allowed to be anywhere in the token stream.\n help_flag_index = None\n for help_flag in self.help_flags:\n try:\n help_flag_index = tokens.index(help_flag)\n break\n except ValueError:\n pass\n\n if help_flag_index is not None:\n tokens.pop(help_flag_index)\n command = self.help_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.help_print\n bound = inspect.signature(command).bind(tokens, console=console)\n unused_tokens = []\n elif any(flag in tokens for flag in self.version_flags):\n # Version\n command = self.version_print\n while meta_parent := meta_parent._meta_parent:\n command = meta_parent.version_print\n bound = inspect.signature(command).bind()\n unused_tokens = []\n else:\n # Normal parsing\n command, bound, unused_tokens = self.parse_known_args(tokens, console=console)\n if unused_tokens:\n raise UnusedCliTokensError(\n target=command,\n unused_tokens=unused_tokens,\n )\n except CycloptsError as e:\n e.verbose = verbose\n e.root_input_tokens = tokens\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n else:\n raise\n\n return command, bound\n\n def __call__(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n print_error: bool = True,\n exit_on_error: bool = True,\n verbose: bool = False,\n ):\n \"\"\"Interprets and executes a command.\n\n Parameters\n ----------\n tokens : Union[None, str, Iterable[str]]\n Either a string, or a list of strings to launch a command.\n Defaults to ``sys.argv[1:]``.\n print_error: bool\n Print a rich-formatted error on error.\n Defaults to ``True``.\n exit_on_error: bool\n If there is an error parsing the CLI tokens invoke ``sys.exit(1)``.\n Otherwise, continue to raise the exception.\n Defaults to ``True``.\n verbose: bool\n Populate exception strings with more information intended for developers.\n Defaults to ``False``.\n\n Returns\n -------\n return_value: Any\n The value the parsed command handler returns.\n \"\"\"\n tokens = normalize_tokens(tokens)\n command, bound = self.parse_args(\n tokens,\n console=console,\n print_error=print_error,\n exit_on_error=exit_on_error,\n verbose=verbose,\n )\n try:\n return command(*bound.args, **bound.kwargs)\n except Exception as e:\n if PydanticValidationError is not None and isinstance(e, PydanticValidationError):\n if print_error:\n if console is None:\n console = Console()\n console.print(format_cyclopts_error(e))\n\n if exit_on_error:\n sys.exit(1)\n raise\n\n def help_print(\n self,\n tokens: Union[None, str, Iterable[str]] = None,\n *,\n console: Optional[Console] = None,\n ) -> None:\n \"\"\"Print the help page.\n\n Parameters\n ----------\n tokens: Union[None, str, Iterable[str]]\n Tokens to interpret for traversing the application command structure.\n If not provided, defaults to ``sys.argv``.\n \"\"\"\n tokens = normalize_tokens(tokens)\n\n if console is None:\n console = Console()\n\n command_chain, apps, _ = self._parse_command_chain(tokens)\n executing_app = apps[-1]\n\n # Print the:\n # my-app command COMMAND [ARGS] [OPTIONS]\n if executing_app.usage is None:\n console.print(format_usage(self, command_chain))\n elif executing_app.usage: # i.e. skip empty-string.\n console.print(executing_app.usage + \"\\n\")\n\n # Print the App/Command's Doc String.\n console.print(format_doc(self, executing_app))\n\n def walk_apps():\n # Iterates from deepest to shallowest meta-apps\n meta_list = [] # shallowest to deepest\n meta_list.append(executing_app)\n meta = executing_app\n while (meta := meta._meta) and meta.default_command:\n meta_list.append(meta)\n yield from reversed(meta_list)\n\n panels: Dict[str, Tuple[Group, HelpPanel]] = {}\n # Handle commands first; there's an off chance they may be \"upgraded\"\n # to an argument/parameter panel.\n for subapp in walk_apps():\n # Handle Commands\n for group, elements in groups_from_app(subapp):\n if not group.show:\n continue\n\n try:\n _, command_panel = panels[group.name]\n except KeyError:\n command_panel = HelpPanel(\n format=\"command\",\n title=group.name,\n )\n panels[group.name] = (group, command_panel)\n\n if group.help:\n if command_panel.description:\n command_panel.description += \"\\n\" + group.help\n else:\n command_panel.description = group.help\n\n command_panel.entries.extend(format_command_entries(elements))\n\n # Handle Arguments/Parameters\n for subapp in walk_apps():\n if subapp.default_command:\n command = ResolvedCommand(\n subapp.default_command,\n subapp.default_parameter,\n subapp.group_arguments,\n subapp.group_parameters,\n )\n for group, iparams in command.groups_iparams:\n if not group.show:\n continue\n cparams = [command.iparam_to_cparam[x] for x in iparams]\n try:\n _, existing_panel = panels[group.name]\n except KeyError:\n existing_panel = None\n new_panel = create_parameter_help_panel(group, iparams, cparams)\n\n if existing_panel:\n # An imperfect merging process\n existing_panel.format = \"parameter\"\n existing_panel.entries = new_panel.entries + existing_panel.entries # Commands go last\n if new_panel.description:\n if existing_panel.description:\n existing_panel.description += \"\\n\" + new_panel.description\n else:\n existing_panel.description = new_panel.description\n else:\n panels[group.name] = (group, new_panel)\n\n groups = [x[0] for x in panels.values()]\n help_panels = [x[1] for x in panels.values()]\n\n for help_panel in sort_groups(groups, help_panels)[1]:\n help_panel.remove_duplicates()\n if help_panel.format == \"command\":\n # don't sort format == \"parameter\" because order may matter there!\n help_panel.sort()\n console.print(help_panel)\n\n def interactive_shell(\n self,\n prompt: str = \"$ \",\n quit: Union[None, str, Iterable[str]] = None,\n dispatcher: Optional[Dispatcher] = None,\n **kwargs,\n ) -> None:\n \"\"\"Create a blocking, interactive shell.\n\n All registered commands can be executed in the shell.\n\n Parameters\n ----------\n prompt: str\n Shell prompt. Defaults to ``\"$ \"``.\n quit: Union[str, Iterable[str]]\n String or list of strings that will cause the shell to exit and this method to return.\n Defaults to ``[\"q\", \"quit\"]``.\n dispatcher: Optional[Dispatcher]\n Optional function that subsequently invokes the command.\n The ``dispatcher`` function must have signature:\n\n .. code-block:: python\n\n def dispatcher(command: Callable, bound: inspect.BoundArguments) -> Any:\n return command(*bound.args, **bound.kwargs)\n\n The above is the default dispatcher implementation.\n `**kwargs`\n Get passed along to :meth:`parse_args`.\n \"\"\"\n if os.name == \"posix\":\n print(\"Interactive shell. Press Ctrl-D to exit.\")\n else: # Windows\n print(\"Interactive shell. Press Ctrl-Z followed by Enter to exit.\")\n\n if quit is None:\n quit = [\"q\", \"quit\"]\n if isinstance(quit, str):\n quit = [quit]\n\n def default_dispatcher(command, bound):\n return command(*bound.args, **bound.kwargs)\n\n if dispatcher is None:\n dispatcher = default_dispatcher\n\n kwargs.setdefault(\"exit_on_error\", False)\n\n while True:\n try:\n user_input = input(prompt)\n except EOFError:\n break\n\n tokens = normalize_tokens(user_input)\n if not tokens:\n continue\n if tokens[0] in quit:\n break\n\n try:\n command, bound = self.parse_args(tokens, **kwargs)\n dispatcher(command, bound)\n except CycloptsError:\n # Upstream ``parse_args`` already printed the error\n pass\n except Exception:\n print(traceback.format_exc())\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n non_defaults = {}\n for a in self.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if not a.init:\n continue\n v = getattr(self, a.name)\n # Compare types first because of some weird attribute issues.\n if type(v) != type(a.default) or v != a.default: # noqa: E721\n non_defaults[a.alias] = v\n\n signature = \", \".join(f\"{k}={v!r}\" for k, v in non_defaults.items())\n return f\"{type(self).__name__}({signature})\"" }, { "identifier": "Group", "path": "cyclopts/group.py", "snippet": "class Group:\n name: str = \"\"\n\n help: str = \"\"\n\n # All below parameters are keyword-only\n _show: Optional[bool] = field(default=None, alias=\"show\", kw_only=True)\n\n _sort_key: Any = field(\n default=None,\n alias=\"sort_key\",\n converter=lambda x: NO_USER_SORT_KEY if x is None else x,\n )\n\n converter: Optional[Callable] = field(default=None, kw_only=True)\n\n validator: Tuple[Callable, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n kw_only=True,\n )\n\n default_parameter: Optional[\"Parameter\"] = field(\n default=None,\n validator=_group_default_parameter_must_be_none,\n kw_only=True,\n )\n\n def __str__(self):\n return self.name\n\n @property\n def show(self):\n return bool(self.name) if self._show is None else self._show\n\n @show.setter\n def show(self, value):\n self._show = value\n\n @property\n def sort_key(self):\n return None if self._sort_key is NO_USER_SORT_KEY else self._sort_key\n\n @sort_key.setter\n def sort_key(self, value):\n self._sort_key = value\n\n @classmethod\n def create_default_arguments(cls):\n return cls(\"Arguments\")\n\n @classmethod\n def create_default_parameters(cls):\n return cls(\"Parameters\")\n\n @classmethod\n def create_default_commands(cls):\n return cls(\"Commands\")\n\n @classmethod\n def create_ordered(cls, *args, sort_key=None, **kwargs):\n \"\"\"Create a group with a globally incremented :attr:`~Group.sort_key`.\n\n Used to create a group that will be displayed **after** a previously declared :meth:`Group.create_ordered` group on the help-page.\n\n If a :attr:`~Group.sort_key` is provided, it is **prepended** to the globally incremented counter value (i.e. has priority during sorting).\n \"\"\"\n count = next(_sort_key_counter)\n if sort_key is None:\n sort_key = (NO_USER_SORT_KEY, count)\n elif is_iterable(sort_key):\n sort_key = (tuple(sort_key), count)\n else:\n sort_key = (sort_key, count)\n return cls(*args, sort_key=sort_key, **kwargs)" }, { "identifier": "Parameter", "path": "cyclopts/parameter.py", "snippet": "class Parameter:\n \"\"\"Cyclopts configuration for individual function parameters.\"\"\"\n\n # All documentation has been moved to ``docs/api.rst`` for greater control with attrs.\n\n name: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n converter: Callable = field(default=None, converter=attrs.converters.default_if_none(convert))\n\n validator: Tuple[Callable, ...] = field(\n default=(),\n converter=lambda x: cast(Tuple[Callable, ...], to_tuple_converter(x)),\n )\n\n negative: Union[None, Tuple[str, ...]] = field(default=None, converter=optional_to_tuple_converter)\n\n group: Tuple[Union[Group, str], ...] = field(default=None, converter=to_tuple_converter, hash=False)\n\n parse: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n _show: Optional[bool] = field(default=None, alias=\"show\")\n\n show_default: Optional[bool] = field(default=None)\n\n show_choices: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n help: Optional[str] = field(default=None)\n\n show_env_var: bool = field(default=None, converter=attrs.converters.default_if_none(True))\n\n env_var: Tuple[str, ...] = field(\n default=None,\n converter=lambda x: cast(Tuple[str, ...], to_tuple_converter(x)),\n )\n\n negative_bool: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--no-\",)),\n validator=_double_hyphen_validator,\n )\n\n negative_iterable: Tuple[str, ...] = field(\n default=None,\n converter=_negative_converter((\"--empty-\",)),\n validator=_double_hyphen_validator,\n )\n\n required: Optional[bool] = field(default=None)\n\n allow_leading_hyphen: bool = field(default=False)\n\n # Populated by the record_attrs_init_args decorator.\n _provided_args: Tuple[str] = field(default=(), init=False, eq=False)\n\n @property\n def show(self):\n return self._show if self._show is not None else self.parse\n\n def get_negatives(self, type_, *names: str) -> Tuple[str, ...]:\n type_ = get_origin(type_) or type_\n\n if self.negative is not None:\n return self.negative\n elif type_ not in (bool, list, set):\n return ()\n\n out = []\n for name in names:\n if name.startswith(\"--\"):\n name = name[2:]\n elif name.startswith(\"-\"):\n # Do not support automatic negation for short flags.\n continue\n else:\n # Should never reach here.\n raise NotImplementedError(\"All parameters should have started with '-' or '--'.\")\n\n negative_prefixes = self.negative_bool if type_ is bool else self.negative_iterable\n\n for negative_prefix in negative_prefixes:\n out.append(f\"{negative_prefix}{name}\")\n return tuple(out)\n\n def __repr__(self):\n \"\"\"Only shows non-default values.\"\"\"\n content = \", \".join(\n [\n f\"{a.alias}={getattr(self, a.name)!r}\"\n for a in self.__attrs_attrs__ # pyright: ignore[reportGeneralTypeIssues]\n if a.alias in self._provided_args\n ]\n )\n return f\"{type(self).__name__}({content})\"\n\n @classmethod\n def combine(cls, *parameters: Optional[\"Parameter\"]) -> \"Parameter\":\n \"\"\"Returns a new Parameter with values of ``parameters``.\n\n Parameters\n ----------\n `*parameters`: Optional[Parameter]\n Parameters who's attributes override ``self`` attributes.\n Ordered from least-to-highest attribute priority.\n \"\"\"\n kwargs = {}\n for parameter in parameters:\n if parameter is None:\n continue\n for a in parameter.__attrs_attrs__: # pyright: ignore[reportGeneralTypeIssues]\n if a.init and a.alias in parameter._provided_args:\n kwargs[a.alias] = getattr(parameter, a.name)\n\n return cls(**kwargs)\n\n @classmethod\n def default(cls) -> \"Parameter\":\n \"\"\"Create a Parameter with all Cyclopts-default values.\n\n This is different than just :class:`Parameter` because the default\n values will be recorded and override all upstream parameter values.\n \"\"\"\n return cls(\n **{a.alias: a.default for a in cls.__attrs_attrs__ if a.init} # pyright: ignore[reportGeneralTypeIssues]\n )" }, { "identifier": "HelpEntry", "path": "cyclopts/help.py", "snippet": "class HelpEntry:\n name: str\n short: str = \"\"\n description: str = \"\"\n required: bool = False" }, { "identifier": "HelpPanel", "path": "cyclopts/help.py", "snippet": "class HelpPanel:\n format: Literal[\"command\", \"parameter\"]\n title: str\n description: str = \"\"\n entries: List[HelpEntry] = field(factory=list)\n\n def remove_duplicates(self):\n seen, out = set(), []\n for item in self.entries:\n if item not in seen:\n seen.add(item)\n out.append(item)\n self.entries = out\n\n def sort(self):\n self.entries.sort(key=lambda x: (x.name.startswith(\"-\"), x.name))\n\n def __rich__(self):\n if not self.entries:\n return _silent\n table = Table.grid(padding=(0, 1))\n text = Text(end=\"\")\n if self.description:\n text.append(self.description + \"\\n\\n\")\n panel = Panel(\n console.Group(text, table),\n box=box.ROUNDED,\n expand=True,\n title_align=\"left\",\n title=self.title,\n )\n\n if self.format == \"command\":\n table.add_column(justify=\"left\", style=\"cyan\")\n table.add_column(justify=\"left\")\n\n for entry in self.entries:\n name = entry.name\n if entry.short:\n name += \",\" + entry.short\n table.add_row(name + \" \", entry.description)\n elif self.format == \"parameter\":\n has_short = any(entry.short for entry in self.entries)\n has_required = any(entry.required for entry in self.entries)\n\n if has_required:\n table.add_column(justify=\"left\", width=1, style=\"red bold\") # For asterisk\n table.add_column(justify=\"left\", no_wrap=True, style=\"cyan\") # For option names\n if has_short:\n table.add_column(justify=\"left\", no_wrap=True, style=\"green\") # For short options\n table.add_column(justify=\"left\") # For main help text.\n\n for entry in self.entries:\n row = []\n if has_required:\n if entry.required:\n row.append(\"*\")\n else:\n row.append(\"\")\n row.append(entry.name + \" \")\n if has_short:\n row.append(entry.short + \" \")\n row.append(entry.description)\n table.add_row(*row)\n else:\n raise NotImplementedError\n\n return panel" }, { "identifier": "create_parameter_help_panel", "path": "cyclopts/help.py", "snippet": "def create_parameter_help_panel(group: \"Group\", iparams, cparams: List[Parameter]) -> HelpPanel:\n icparams = [(ip, cp) for ip, cp in zip(iparams, cparams) if cp.show]\n iparams, cparams = (list(x) for x in zip(*icparams))\n\n help_panel = HelpPanel(format=\"parameter\", title=group.name, description=group.help)\n\n for iparam, cparam in icparams:\n assert cparam.name is not None\n type_ = get_hint_parameter(iparam)[0]\n options = list(cparam.name)\n options.extend(cparam.get_negatives(type_, *options))\n\n # Add an all-uppercase name if it's an argument\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.POSITIONAL_OR_KEYWORD):\n arg_name = options[0].lstrip(\"-\").upper()\n if arg_name != options[0]:\n options = [arg_name, *options]\n\n short_options, long_options = [], []\n for option in options:\n if _is_short(option):\n short_options.append(option)\n else:\n long_options.append(option)\n\n help_components = []\n\n if cparam.help:\n help_components.append(cparam.help)\n\n if cparam.show_choices:\n choices = _get_choices(type_)\n if choices:\n help_components.append(rf\"[dim]\\[choices: {choices}][/dim]\")\n\n if cparam.show_env_var and cparam.env_var:\n env_vars = \" \".join(cparam.env_var)\n help_components.append(rf\"[dim]\\[env var: {env_vars}][/dim]\")\n\n if not cparam.required and (\n cparam.show_default or (cparam.show_default is None and iparam.default is not None)\n ):\n default = \"\"\n if isclass(type_) and issubclass(type_, Enum):\n default = iparam.default.name.lower().replace(\"_\", \"-\")\n else:\n default = iparam.default\n\n help_components.append(rf\"[dim]\\[default: {default}][/dim]\")\n\n if cparam.required:\n help_components.append(r\"[red][dim]\\[required][/dim][/red]\")\n\n # populate row\n help_panel.entries.append(\n HelpEntry(\n name=\",\".join(long_options),\n description=\" \".join(help_components),\n short=\",\".join(short_options),\n required=bool(cparam.required),\n )\n )\n\n return help_panel" }, { "identifier": "format_command_entries", "path": "cyclopts/help.py", "snippet": "def format_command_entries(elements) -> List:\n entries = []\n for element in elements:\n short_names, long_names = [], []\n for name in element.name:\n short_names.append(name) if _is_short(name) else long_names.append(name)\n entry = HelpEntry(\n name=\",\".join(long_names),\n short=\",\".join(short_names),\n description=docstring_parse(element.help).short_description or \"\",\n )\n if entry not in entries:\n entries.append(entry)\n return entries" }, { "identifier": "format_doc", "path": "cyclopts/help.py", "snippet": "def format_doc(root_app, app: \"App\"):\n from cyclopts.core import App # noqa: F811\n\n raw_doc_string = app.help\n\n if not raw_doc_string:\n return _silent\n\n parsed = docstring_parse(raw_doc_string)\n\n components: List[Tuple[str, str]] = []\n if parsed.short_description:\n components.append((parsed.short_description + \"\\n\", \"default\"))\n\n if parsed.long_description:\n components.append((\"\\n\" + parsed.long_description + \"\\n\", \"info\"))\n\n return Text.assemble(*components)" }, { "identifier": "format_usage", "path": "cyclopts/help.py", "snippet": "def format_usage(\n app,\n command_chain: List[str],\n):\n usage = []\n usage.append(\"Usage:\")\n usage.append(app.name[0])\n usage.extend(command_chain)\n\n for command in command_chain:\n app = app[command]\n\n if app._commands:\n usage.append(\"COMMAND\")\n\n if app.default_command:\n to_show = set()\n for parameter in inspect.signature(app.default_command).parameters.values():\n if parameter.kind in (parameter.POSITIONAL_ONLY, parameter.VAR_POSITIONAL, parameter.POSITIONAL_OR_KEYWORD):\n to_show.add(\"[ARGS]\")\n if parameter.kind in (parameter.KEYWORD_ONLY, parameter.VAR_KEYWORD, parameter.POSITIONAL_OR_KEYWORD):\n to_show.add(\"[OPTIONS]\")\n usage.extend(sorted(to_show))\n\n return Text(\" \".join(usage) + \"\\n\", style=\"bold\")" }, { "identifier": "ResolvedCommand", "path": "cyclopts/resolve.py", "snippet": "class ResolvedCommand:\n command: Callable\n groups: List[Group]\n groups_iparams: List[Tuple[Group, List[inspect.Parameter]]]\n iparam_to_groups: ParameterDict\n iparam_to_cparam: ParameterDict\n name_to_iparam: Dict[str, inspect.Parameter]\n\n def __init__(\n self,\n f,\n app_parameter: Optional[Parameter] = None,\n group_arguments: Optional[Group] = None,\n group_parameters: Optional[Group] = None,\n parse_docstring: bool = True,\n ):\n \"\"\"\n ``app_parameter`` implicitly has the command-group parameter already resolved.\n\n Parameters\n ----------\n f: Callable\n Function to resolve annotated :class:`Parameters`.\n app_parameter:\n Default :class:`Parameter` to inherit configuration from.\n group_arguments: Optional[Group]\n Default :class:`Group` for positional-only arguments.\n group_parameters: Optional[Group]\n Default :class:`Group` for non-positional-only arguments.\n parse_docstring: bool\n Parse the docstring to populate Parameter ``help``, if not explicitly set.\n Disable for improved performance if ``help`` won't be used in the resulting :class:`Parameter`.\n \"\"\"\n if group_arguments is None:\n group_arguments = Group.create_default_arguments()\n if group_parameters is None:\n group_parameters = Group.create_default_parameters()\n\n self.command = f\n signature = inspect.signature(f)\n self.name_to_iparam = cast(Dict[str, inspect.Parameter], signature.parameters)\n\n # Get:\n # 1. Fully resolved and created Groups.\n # 2. A mapping of inspect.Parameter to those Group objects.\n self.groups, self.iparam_to_groups = _resolve_groups(f, app_parameter, group_arguments, group_parameters)\n\n # Fully Resolve each Cyclopts Parameter\n self.iparam_to_cparam = ParameterDict()\n iparam_to_docstring_cparam = _resolve_docstring(f) if parse_docstring else ParameterDict()\n for iparam, groups in self.iparam_to_groups.items():\n if iparam.kind in (iparam.POSITIONAL_ONLY, iparam.VAR_POSITIONAL):\n # Name is only used for help-string\n names = [iparam.name.upper()]\n else:\n names = [\"--\" + iparam.name.replace(\"_\", \"-\")]\n\n default_name_parameter = Parameter(name=names)\n\n cparam = get_hint_parameter(\n iparam,\n app_parameter,\n *(x.default_parameter for x in groups),\n iparam_to_docstring_cparam.get(iparam),\n default_name_parameter,\n Parameter(required=iparam.default is iparam.empty),\n )[1]\n self.iparam_to_cparam[iparam] = cparam\n\n self.bind = signature.bind_partial if _has_unparsed_parameters(f, app_parameter) else signature.bind\n\n # Create a convenient group-to-iparam structure\n self.groups_iparams = [\n (\n group,\n [iparam for iparam, groups in self.iparam_to_groups.items() if group in groups],\n )\n for group in self.groups\n ]" } ]
import inspect import sys import attrs import pytest from enum import Enum from textwrap import dedent from typing import List, Literal, Optional, Union from typing_extensions import Annotated from typing import Annotated from cyclopts import App, Group, Parameter from cyclopts.help import ( HelpEntry, HelpPanel, create_parameter_help_panel, format_command_entries, format_doc, format_usage, ) from cyclopts.resolve import ResolvedCommand
12,182
def cmd( foo: Annotated[CompSciProblem, Parameter(help="Docstring for foo.")] = CompSciProblem.fizz, bar: Annotated[CompSciProblem, Parameter(help="Docstring for bar.")] = CompSciProblem.buzz, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [choices: fizz,buzz] [default: fizz] │ │ BAR,--bar Docstring for bar. [choices: fizz,buzz] [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_env_var(capture_format_group_parameters): def cmd( foo: Annotated[int, Parameter(env_var=["FOO", "BAR"], help="Docstring for foo.")] = 123, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [env var: FOO BAR] [default: 123] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(help="Docstring for bar.")], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * --bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_defaults(app, console): @app.command(help="Cmd help string.") def cmd( *tokens: Annotated[str, Parameter(show=False, allow_leading_hyphen=True)], bar: Annotated[str, Parameter(help="Docstring for bar.")] = "bar-value", baz: Annotated[str, Parameter(help="Docstring for bar.", env_var="BAZ")] = "baz-value", ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ --bar Docstring for bar. [default: bar-value] │ │ --baz Docstring for bar. [env var: BAZ] [default: baz-value] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_no_parse(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(parse=False)], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_parameter_group_description(app, console):
if sys.version_info < (3, 9): else: @pytest.fixture def app(): return App( name="app", help="App Help String Line 1.", ) def test_empty_help_panel_rich_silent(console): help_panel = HelpPanel(format="command", title="test") with console.capture() as capture: console.print(help_panel) actual = capture.get() assert actual == "" def test_help_default_action(app, console): """No command should default to help.""" with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage(app, console): app.usage = "My custom usage." with console.capture() as capture: app([], console=console) actual = capture.get() expected = dedent( """\ My custom usage. App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_custom_usage_subapp(app, console): app.command(App(name="foo", usage="My custom usage.")) with console.capture() as capture: app(["foo", "--help"], console=console) actual = capture.get() expected = dedent( """\ My custom usage. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_default_help_flags(console): """Standard help flags.""" app = App(name="app", help="App Help String Line 1.") with console.capture() as capture: app(["--help"], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_usage_empty(console): app = App( name="app", help="App Help String Line 1.", help_flags=[], version_flags=[], ) with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app\n\n" def test_help_format_usage_command(app, console): @app.command def foo(): pass with console.capture() as capture: console.print(format_usage(app, [])) actual = capture.get() assert actual == "Usage: app COMMAND\n\n" def test_format_commands_docstring(app, console): @app.command def foo(): """Docstring for foo. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_no_show(app, console): @app.command def foo(): """Docstring for foo.""" pass @app.command(show=False) def bar(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app,))) with console.capture() as capture: app.help_print([], console=console) actual = capture.get() expected = dedent( """\ Usage: app COMMAND App Help String Line 1. ╭─ Commands ─────────────────────────────────────────────────────────╮ │ foo Docstring for foo. │ │ --help,-h Display this message and exit. │ │ --version Display application version. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_format_commands_explicit_help(app, console): @app.command(help="Docstring for foo.") def foo(): """Should not be shown.""" pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["foo"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ foo Docstring for foo. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_format_commands_explicit_name(app, console): @app.command(name="bar") def foo(): """Docstring for bar. This should not be shown. """ pass panel = HelpPanel(title="Commands", format="command") panel.entries.extend(format_command_entries((app["bar"],))) with console.capture() as capture: console.print(panel) actual = capture.get() assert actual == ( "╭─ Commands ─────────────────────────────────────────────────────────╮\n" "│ bar Docstring for bar. │\n" "╰────────────────────────────────────────────────────────────────────╯\n" ) def test_help_empty(console): app = App(name="foo", version_flags=[], help_flags=[]) with console.capture() as capture: app.help_print(console=console) actual = capture.get() assert actual == "Usage: foo\n\n" @pytest.fixture def capture_format_group_parameters(console, default_function_groups): def inner(cmd): command = ResolvedCommand(cmd, *default_function_groups) with console.capture() as capture: group, iparams = command.groups_iparams[0] cparams = [command.iparam_to_cparam[x] for x in iparams] console.print(create_parameter_help_panel(group, iparams, cparams)) return capture.get() return inner def test_help_format_group_parameters(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], bar: Annotated[str, Parameter(help="Docstring for bar.")], ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * BAR,--bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_short_name(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(name=["--foo", "-f"], help="Docstring for foo.")], ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo -f Docstring for foo. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_from_docstring(capture_format_group_parameters): def cmd(foo: str, bar: str): """ Parameters ---------- foo: str Docstring for foo. bar: str Docstring for bar. """ pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * BAR,--bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_bool_flag(capture_format_group_parameters): def cmd( foo: Annotated[bool, Parameter(help="Docstring for foo.")] = True, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo,--no-foo Docstring for foo. [default: True] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_bool_flag_custom_negative(capture_format_group_parameters): def cmd( foo: Annotated[bool, Parameter(negative="--yesnt-foo", help="Docstring for foo.")] = True, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo,--yesnt-foo Docstring for foo. [default: True] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_list_flag(capture_format_group_parameters): def cmd( foo: Annotated[Optional[List[int]], Parameter(help="Docstring for foo.")] = None, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo,--empty-foo Docstring for foo. │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_defaults(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")] = "fizz", bar: Annotated[str, Parameter(help="Docstring for bar.")] = "buzz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [default: fizz] │ │ BAR,--bar Docstring for bar. [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_defaults_no_show(capture_format_group_parameters): def cmd( foo: Annotated[str, Parameter(show_default=False, help="Docstring for foo.")] = "fizz", bar: Annotated[str, Parameter(help="Docstring for bar.")] = "buzz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. │ │ BAR,--bar Docstring for bar. [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_choices_literal_no_show(capture_format_group_parameters): def cmd( foo: Annotated[Literal["fizz", "buzz"], Parameter(show_choices=False, help="Docstring for foo.")] = "fizz", bar: Annotated[Literal["fizz", "buzz"], Parameter(help="Docstring for bar.")] = "buzz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [default: fizz] │ │ BAR,--bar Docstring for bar. [choices: fizz,buzz] [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_choices_literal_union(capture_format_group_parameters): def cmd( foo: Annotated[ Union[int, Literal["fizz", "buzz"], Literal["bar"]], Parameter(help="Docstring for foo.") ] = "fizz", ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [choices: fizz,buzz,bar] [default: │ │ fizz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_choices_enum(capture_format_group_parameters): class CompSciProblem(Enum): fizz = "bleep bloop blop" buzz = "blop bleep bloop" def cmd( foo: Annotated[CompSciProblem, Parameter(help="Docstring for foo.")] = CompSciProblem.fizz, bar: Annotated[CompSciProblem, Parameter(help="Docstring for bar.")] = CompSciProblem.buzz, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [choices: fizz,buzz] [default: fizz] │ │ BAR,--bar Docstring for bar. [choices: fizz,buzz] [default: buzz] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_format_group_parameters_env_var(capture_format_group_parameters): def cmd( foo: Annotated[int, Parameter(env_var=["FOO", "BAR"], help="Docstring for foo.")] = 123, ): pass actual = capture_format_group_parameters(cmd) expected = dedent( """\ ╭─ Parameters ───────────────────────────────────────────────────────╮ │ FOO,--foo Docstring for foo. [env var: FOO BAR] [default: 123] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(help="Docstring for bar.")], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ │ * --bar Docstring for bar. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_defaults(app, console): @app.command(help="Cmd help string.") def cmd( *tokens: Annotated[str, Parameter(show=False, allow_leading_hyphen=True)], bar: Annotated[str, Parameter(help="Docstring for bar.")] = "bar-value", baz: Annotated[str, Parameter(help="Docstring for bar.", env_var="BAZ")] = "baz-value", ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ --bar Docstring for bar. [default: bar-value] │ │ --baz Docstring for bar. [env var: BAZ] [default: baz-value] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_function_no_parse(app, console): @app.command(help="Cmd help string.") def cmd( foo: Annotated[str, Parameter(help="Docstring for foo.")], *, bar: Annotated[str, Parameter(parse=False)], ): pass with console.capture() as capture: app.help_print(["cmd"], console=console) actual = capture.get() expected = dedent( """\ Usage: app cmd [ARGS] [OPTIONS] Cmd help string. ╭─ Parameters ───────────────────────────────────────────────────────╮ │ * FOO,--foo Docstring for foo. [required] │ ╰────────────────────────────────────────────────────────────────────╯ """ ) assert actual == expected def test_help_print_parameter_group_description(app, console):
@app.command(group_parameters=Group("Custom Title", help="Parameter description."))
1
2023-11-03 02:24:25+00:00
16k
radekd91/inferno
inferno/datasets/AfewVaDataModule.py
[ { "identifier": "load_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def load_segmentation(filename):\n with open(filename, \"rb\") as f:\n seg = cpkl.load(f, compression='gzip')\n seg_type = seg[0]\n seg_image = seg[1]\n # seg_type = pkl.load(f)\n # seg_image = pkl.load(f)\n return seg_image, seg_type" }, { "identifier": "process_segmentation", "path": "inferno/datasets/IO.py", "snippet": "def process_segmentation(segmentation, seg_type, discarded_labels=None):\n if seg_type == \"face_parsing\":\n discarded_labels = discarded_labels or default_discarded_labels\n # start = timer()\n # segmentation_proc = np.ones_like(segmentation, dtype=np.float32)\n # for label in discarded_labels:\n # segmentation_proc[segmentation == label] = 0.\n segmentation_proc = np.isin(segmentation, discarded_labels)\n segmentation_proc = np.logical_not(segmentation_proc)\n segmentation_proc = segmentation_proc.astype(np.float32)\n # end = timer()\n # print(f\"Segmentation label discarding took {end - start}s\")\n return segmentation_proc\n elif seg_type == \"face_segmentation_focus\":\n segmentation = segmentation > 0.5 \n segmentation = segmentation.astype(np.float32)\n return segmentation\n else:\n raise ValueError(f\"Invalid segmentation type '{seg_type}'\")" }, { "identifier": "load_emotion", "path": "inferno/datasets/IO.py", "snippet": "def load_emotion(filename):\n with open(filename, \"rb\") as f:\n emo = cpkl.load(f, compression='gzip')\n version = emo[0]\n emotion_type = emo[1]\n emotion_features = emo[2]\n return emotion_features, emotion_type" }, { "identifier": "save_emotion", "path": "inferno/datasets/IO.py", "snippet": "def save_emotion(filename, emotion_features, emotion_type, version=0):\n with open(filename, \"wb\") as f:\n # for some reason compressed pickle can only load one object (EOF bug)\n # so put it in the list\n cpkl.dump([version, emotion_type, emotion_features], f, compression='gzip')" }, { "identifier": "numpy_image_to_torch", "path": "inferno/utils/image.py", "snippet": "def numpy_image_to_torch(img : np.ndarray) -> torch.Tensor:\n img = img.transpose([2, 0, 1])\n return torch.from_numpy(img)" }, { "identifier": "KeypointNormalization", "path": "inferno/transforms/keypoints.py", "snippet": "class KeypointNormalization(KeypointTransform):\n\n def __init__(self, scale_x=1., scale_y=1.):\n super().__init__(scale_x, scale_y)\n\n def forward(self, points):\n # normalization the way EMOCA uses it.\n # the keypoints are not used in image space but in normalized space\n # for loss computation\n # the normalization is as follows:\n if isinstance(points, torch.Tensor):\n points_ = points.clone()\n elif isinstance(points, np.ndarray):\n points_ = points.copy()\n else:\n raise ValueError(f\"Invalid type of points {str(type(points))}\")\n points_[..., 0] -= self.scale_x/2\n points_[..., 0] /= self.scale_x/2\n points_[..., 1] -= self.scale_y/2\n points_[..., 1] /= self.scale_y/2\n return points_\n\n def inv(self, points):\n if isinstance(points, torch.Tensor):\n points_ = points.clone()\n elif isinstance(points, np.ndarray):\n points_ = points.copy()\n else:\n raise ValueError(f\"Invalid type of points {str(type(points))}\")\n points_[..., 0] *= self.scale_x / 2\n points_[..., 0] += self.scale_x / 2\n points_[..., 1] *= self.scale_y / 2\n points_[..., 1] += self.scale_y / 2\n return points_" }, { "identifier": "FaceDataModuleBase", "path": "inferno/datasets/FaceDataModuleBase.py", "snippet": "class FaceDataModuleBase(pl.LightningDataModule):\n \"\"\"\n A base data module for face datasets. This DM can be inherited by any face datasets, which just adapt things \n to the dataset's specificities (such as different GT or data storage structure). \n This class can take care of face detection, recognition, segmentation and landmark detection.\n \"\"\"\n\n def __init__(self, root_dir, output_dir, processed_subfolder, device=None,\n face_detector='fan',\n face_detector_threshold=0.9,\n image_size=224,\n scale=1.25,\n bb_center_shift_x=0., # in relative numbers\n bb_center_shift_y=0., # in relative numbers (i.e. -0.1 for 10% shift upwards, ...)\n processed_ext=\".png\", \n save_detection_images=True, \n save_landmarks_frame_by_frame=True, # default\n save_landmarks_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n save_segmentation_frame_by_frame=True, # default\n save_segmentation_one_file=False, # only use for large scale video datasets (that would produce too many files otherwise)\n return_mica_images=False,\n ):\n super().__init__()\n self.root_dir = root_dir\n self.output_dir = output_dir\n self.bb_center_shift_x = bb_center_shift_x\n self.bb_center_shift_y = bb_center_shift_y\n self.processed_ext = processed_ext\n self.save_detection_images=save_detection_images\n self.save_landmarks_frame_by_frame = save_landmarks_frame_by_frame\n self.save_landmarks_one_file = save_landmarks_one_file\n assert not (save_landmarks_one_file and save_landmarks_frame_by_frame) # only one of them can be true\n self.save_segmentation_frame_by_frame = save_segmentation_frame_by_frame\n self.save_segmentation_one_file = save_segmentation_one_file\n assert not (save_segmentation_one_file and save_segmentation_frame_by_frame) # only one of them can be true\n\n if processed_subfolder is None:\n import datetime\n date = datetime.datetime.now()\n processed_folder = os.path.join(output_dir, \"processed_%s\" % date.strftime(\"%Y_%b_%d_%H-%M-%S\"))\n else:\n processed_folder = os.path.join(output_dir, processed_subfolder)\n self.output_dir = processed_folder\n\n self.device = device or torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n self.face_detector_type = face_detector\n self.face_detector_threshold = face_detector_threshold\n\n self.image_size = image_size\n self.scale = scale\n self.return_mica_images = return_mica_images\n\n def _get_max_faces_per_image(self): \n return 1\n \n def _is_video_dataset(self): \n return False\n\n # @profile\n def _instantiate_detector(self, overwrite = False, face_detector=None):\n face_detector = face_detector or self.face_detector_type\n if hasattr(self, 'face_detector'):\n if not overwrite:\n return\n del self.face_detector\n if self.face_detector_type == 'fan':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='2D')\n elif self.face_detector_type == 'fan3d':\n self.face_detector = FAN(self.device, threshold=self.face_detector_threshold, mode='3D')\n elif self.face_detector_type == 'mtcnn':\n self.face_detector = MTCNN(self.device)\n elif self.face_detector_type == '3fabrec': \n from inferno.utils.TFabRecLandmarkDetector import TFabRec\n self.face_detector = TFabRec(instantiate_detector='sfd', threshold=self.face_detector_threshold)\n elif self.face_detector_type == 'mediapipe': \n from inferno.utils.MediaPipeLandmarkDetector import MediaPipeLandmarkDetector\n self.face_detector = MediaPipeLandmarkDetector(threshold=self.face_detector_threshold, \n video_based=self._is_video_dataset(), max_faces=self._get_max_faces_per_image())\n elif self.face_detector_type == 'deep3dface': \n from inferno.utils.Deep3DFaceLandmarkDetector import Deep3DFaceLandmarkDetector\n self.face_detector = Deep3DFaceLandmarkDetector(instantiate_detector='mtcnn')\n else:\n raise ValueError(\"Invalid face detector specifier '%s'\" % self.face_detector)\n\n # @profile\n def _detect_faces_in_image(self, image_or_path, detected_faces=None):\n # imagepath = self.imagepath_list[index]\n # imagename = imagepath.split('/')[-1].split('.')[0]\n if isinstance(image_or_path, (str, Path)):\n image = np.array(imread(image_or_path))\n elif isinstance(image_or_path, np.ndarray):\n image = image_or_path\n else: \n raise ValueError(\"Invalid image type '%s'\" % type(image_or_path)) \n \n if len(image.shape) == 2:\n image = np.tile(image[:, :, None], (1, 1, 3))\n if len(image.shape) == 3 and image.shape[2] > 3:\n image = image[:, :, :3]\n\n h, w, _ = image.shape\n self._instantiate_detector()\n bounding_boxes, bbox_type, landmarks = self.face_detector.run(image,\n with_landmarks=True,\n detected_faces=detected_faces)\n image = image / 255.\n detection_images = []\n detection_centers = []\n detection_sizes = []\n detection_landmarks = [] # landmarks wrt the detection image\n # original_landmarks = [] # landmarks wrt the original image\n original_landmarks = landmarks # landmarks wrt the original image\n # detection_embeddings = []\n if len(bounding_boxes) == 0:\n # print('no face detected! run original image')\n return detection_images, detection_centers, detection_images, \\\n bbox_type, detection_landmarks, original_landmarks\n # left = 0\n # right = h - 1\n # top = 0\n # bottom = w - 1\n # bounding_boxes += [[left, right, top, bottom]]\n\n for bi, bbox in enumerate(bounding_boxes):\n left = bbox[0]\n right = bbox[2]\n top = bbox[1]\n bottom = bbox[3]\n old_size, center = bbox2point(left, right, top, bottom, type=bbox_type)\n\n center[0] += abs(right-left)*self.bb_center_shift_x\n center[1] += abs(bottom-top)*self.bb_center_shift_y\n\n size = int(old_size * self.scale)\n\n dst_image, dts_landmark = bbpoint_warp(image, center, size, self.image_size, landmarks=landmarks[bi])\n\n # dst_image = dst_image.transpose(2, 0, 1)\n #\n detection_images += [(dst_image*255).astype(np.uint8)]\n detection_centers += [center]\n detection_sizes += [size]\n\n # imsave(os.path.join(\"detection_%d.png\" % bi), dst_image)\n\n # to be checked\n detection_landmarks += [dts_landmark]\n\n del image\n return detection_images, detection_centers, detection_sizes, bbox_type, detection_landmarks, original_landmarks\n\n # @profile\n def _detect_faces_in_image_wrapper(self, frame_list, fid, out_detection_folder, out_landmark_folder, bb_outfile,\n centers_all, sizes_all, detection_fnames_all, landmark_fnames_all, \n out_landmarks_all=None, out_landmarks_orig_all=None, out_bbox_type_all=None):\n\n if isinstance(frame_list, (str, Path, list)):\\\n # if frame list is a list of image paths\n frame_fname = frame_list[fid]\n # detect faces in each frames\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(Path(self.output_dir) / frame_fname)\n elif isinstance(frame_list, (np.ndarray, types.GeneratorType)): \n # frame_list is an array of many images, or a generator (like a video reader)\n frame_fname =Path(f\"{fid:05d}.png\")\n if isinstance(frame_list, np.ndarray):\n frame = frame_list[fid]\n else: \n frame = next(frame_list)\n detection_ims, centers, sizes, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(frame)\n # if len(detection_ims) > 0: # debug visualization\n # imsave(frame_fname, detection_ims[0])\n \n # self.detection_lists[sequence_id][fid] += [detections]\n # import plotly.graph_objects as go\n # fig = go.Figure(data=go.Image(z=frame,))\n # fig.show()\n\n \n centers_all += [centers]\n sizes_all += [sizes]\n if out_landmarks_all is not None:\n out_landmarks_all += [landmarks]\n if out_landmarks_orig_all is not None:\n out_landmarks_orig_all += [orig_landmarks]\n if out_bbox_type_all is not None:\n out_bbox_type_all += [[bbox_type]*len(landmarks)]\n\n # save detections\n detection_fnames = []\n landmark_fnames = []\n for di, detection in enumerate(detection_ims):\n # save detection\n stem = frame_fname.stem + \"_%.03d\" % di\n if self.save_detection_images:\n out_detection_fname = out_detection_folder / (stem + self.processed_ext)\n detection_fnames += [out_detection_fname.relative_to(self.output_dir)]\n if self.processed_ext in ['.JPG', '.jpg', \".jpeg\", \".JPEG\"]:\n imsave(out_detection_fname, detection, quality=100)\n else:\n imsave(out_detection_fname, detection)\n # save landmarks\n if self.save_landmarks_frame_by_frame:\n if self.save_detection_images:\n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, landmarks[di], bbox_type)\n else: \n out_landmark_fname = out_landmark_folder / (stem + \".pkl\")\n landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)]\n save_landmark(out_landmark_fname, orig_landmarks[di], bbox_type)\n\n detection_fnames_all += [detection_fnames]\n landmark_fnames_all += [landmark_fnames]\n\n torch.cuda.empty_cache()\n checkpoint_frequency = 100\n if fid % checkpoint_frequency == 0:\n FaceDataModuleBase.save_detections(bb_outfile, detection_fnames_all, landmark_fnames_all,\n centers_all, sizes_all, fid)\n\n\n def _get_segmentation_method(self): \n return \"focus\"\n # return \"bisenet\"\n\n\n def _segment_images(self, detection_fnames_or_ims, out_segmentation_folder, path_depth = 0, landmarks=None, segmentation_net=None):\n import time\n # segmentation_net = segmentation_net or \"bisenet\"\n segmentation_net = segmentation_net or self._get_segmentation_method()\n if self.save_landmarks_one_file: \n overwrite = False \n # single_out_file = out_segmentation_folder / \"segmentations.pkl\"\n single_out_file = out_segmentation_folder / \"segmentations.hdf5\"\n if single_out_file.is_file() and not overwrite:\n print(f\"Segmentation already found in {single_out_file}, skipping\")\n return\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n print(device)\n net, seg_type, batch_size = self._get_segmentation_net(device, segmentation_net)\n\n # if self.save_detection_images:\n # ref_im = imread(detection_fnames_or_ims[0])\n # else: \n # ref_im = detection_fnames_or_ims[0]\n # ref_size = Resize((ref_im.shape[0], ref_im.shape[1]), interpolation=Image.NEAREST)\n ref_size = None\n\n # transforms = Compose([\n # Resize((512, 512)),\n # Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n # ])\n transforms=None\n # batch_size = 16\n\n if isinstance(detection_fnames_or_ims, types.GeneratorType): \n im_read = \"skvreader\"\n elif isinstance(detection_fnames_or_ims, (FFmpegReader)):\n im_read = \"skvffmpeg\"\n else:\n im_read = 'pil' if not isinstance(detection_fnames_or_ims[0], np.ndarray) else None\n\n dataset = UnsupervisedImageDataset(detection_fnames_or_ims, image_transforms=transforms,\n landmark_list = landmarks,\n im_read=im_read)\n loader = DataLoader(dataset, batch_size=batch_size, num_workers=4 if im_read not in [\"skvreader\", \"skvffmpeg\"] else 1, \n shuffle=False)\n\n # import matplotlib.pyplot as plt\n\n if self.save_segmentation_one_file: \n out_segmentation_names = []\n out_segmentations = []\n out_segmentation_types = []\n\n for i, batch in enumerate(tqdm(loader)):\n # facenet_pytorch expects this stanadrization for the input to the net\n # images = fixed_image_standardization(batch['image'].to(device))\n images = batch['image'].cuda()\n # start = time.time()\n with torch.no_grad():\n segmentation = net(images)\n # end = time.time()\n\n if ref_size is None:\n ref_size = Resize((images.shape[2], images.shape[3]), interpolation=Image.NEAREST)\n\n segmentation = ref_size(segmentation)\n segmentation = segmentation.cpu().numpy()\n\n if self.save_segmentation_frame_by_frame:\n start = time.time()\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n # if isinstance(out_segmentation_folder, list):\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = out_segmentation_folder / rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = out_segmentation_folder / (Path(image_path).stem + \".pkl\")\n segmentation_path.parent.mkdir(exist_ok=True, parents=True)\n # im = images[j]\n # im = im.permute(1,2,0).cpu().numpy()\n # from inferno.datasets.IO import process_segmentation \n # import matplotlib.pyplot as plt\n # from inferno.datasets.FaceVideoDataModule import FaceDataModuleBase\n # seg = process_segmentation(segmentation[j], seg_type)\n # imsave(\"seg.png\", seg)\n # imsave(\"im.png\", im)\n # FaceDataModuleBase.vis_parsing_maps(im, segmentation[j], stride=1, save_im=True,\n # save_path='overlay.png')\n # plt.figure()\n # plt.imshow(im)\n # plt.show()\n # plt.figure()\n # plt.imshow(seg[0])\n # plt.show()\n save_segmentation(segmentation_path, segmentation[j], seg_type)\n print(f\" Saving batch {i} took: {end - start}\")\n end = time.time()\n if self.save_segmentation_one_file: \n segmentation_names = []\n segmentations = []\n for j in range(segmentation.shape[0]):\n image_path = batch['path'][j]\n if path_depth > 0:\n rel_path = Path(image_path).parent.relative_to(Path(image_path).parents[path_depth])\n segmentation_path = rel_path / (Path(image_path).stem + \".pkl\")\n else:\n segmentation_path = Path(image_path).stem \n segmentation_names += [segmentation_path]\n segmentations += [segmentation[j]]\n out_segmentation_names += segmentation_names\n out_segmentations += segmentations\n out_segmentation_types += [seg_type] * len(segmentation_names)\n\n if self.save_landmarks_one_file: \n if single_out_file.suffix == \".pkl\":\n save_segmentation_list(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n elif single_out_file.suffix == \".hdf5\":\n save_segmentation_list_v2(single_out_file, out_segmentations, out_segmentation_types, out_segmentation_names)\n print(\"Segmentation saved to %s\" % single_out_file)\n\n\n def _get_segmentation_net(self, device, method='bisenet'):\n if method == 'bisenet':\n seg_type = 'face_parsing'\n if hasattr(self, \"_bisenet\" ): \n net = self._bisenet\n else:\n from inferno.models.external.BiSeNetFaceParsing import BiSeNetFaceParsing\n net = BiSeNetFaceParsing()\n self._bisenet = net\n batch_size = 64\n elif method == \"gpen\": \n seg_type = 'face_parsing_gpen'\n if hasattr(self, \"_gpen\" ): \n net = self._gpen\n else:\n from inferno.models.external.GPENFaceParsing import GPENFaceParsing\n net = GPENFaceParsing()\n self._gpen = net\n batch_size = 16\n elif method == \"focus\": \n seg_type = 'face_segmentation_focus'\n if hasattr(self, \"_focus\" ): \n net = self._focus\n else:\n from inferno.models.external.FocusSegmentation import FocusSegmentation\n net = FocusSegmentation()\n self._focus = net\n batch_size = 16\n # batch_size = 16\n else: \n raise ValueError(f\"Unknown segmentation type: {method}\" )\n\n # from inferno.utils.other import get_path_to_externals\n # path_to_segnet = get_path_to_externals() / \"face-parsing.PyTorch\"\n # if not(str(path_to_segnet) in sys.path or str(path_to_segnet.absolute()) in sys.path):\n # sys.path += [str(path_to_segnet)]\n\n # from model import BiSeNet\n # n_classes = 19\n # net = BiSeNet(n_classes=n_classes)\n # # net.cuda()\n # save_pth = path_to_segnet / 'res' / 'cp' / '79999_iter.pth'\n # net.load_state_dict(torch.load(save_pth))\n # # net.eval()\n # net.eval().to(device)\n\n # labels = {\n # 0: 'background',\n # 1: 'skin',\n # 2: 'nose',\n # 3: 'eye_g',\n # 4: 'l_eye',\n # 5: 'r_eye',\n # 6: 'l_brow',\n # 7: 'r_brow',\n # 8: 'l_ear',\n # 9: 'r_ear',\n # 10: 'mouth',\n # 11: 'u_lip',\n # 12: 'l_lip',\n # 13: 'hair',\n # 14: 'hat',\n # 15: 'ear_r',\n # 16: 'neck_l',\n # 17: 'neck',\n # 18: 'cloth'\n # }\n\n return net, seg_type , batch_size\n\n\n @staticmethod\n def save_landmark_list(fname, landmarks):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n\n @staticmethod\n def load_landmark_list(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n return landmarks\n\n\n @staticmethod\n def save_landmark_list_v2(fname, landmarks, landmark_confidences, landmark_types):\n with open(fname, \"wb\" ) as f:\n pkl.dump(landmarks, f)\n pkl.dump(landmark_confidences, f)\n pkl.dump(landmark_types, f)\n\n @staticmethod\n def load_landmark_list_v2(fname):\n with open(fname, \"rb\" ) as f:\n landmarks = pkl.load(f)\n landmark_confidences = pkl.load(f)\n landmark_types = pkl.load(f)\n return landmarks, landmark_confidences, landmark_types\n\n\n @staticmethod\n def save_detections(fname, detection_fnames, landmark_fnames, centers, sizes, last_frame_id):\n with open(fname, \"wb\" ) as f:\n pkl.dump(detection_fnames, f)\n pkl.dump(centers, f)\n pkl.dump(sizes, f)\n pkl.dump(last_frame_id, f)\n pkl.dump(landmark_fnames, f)\n\n @staticmethod\n def load_detections(fname):\n with open(fname, \"rb\" ) as f:\n detection_fnames = pkl.load(f)\n centers = pkl.load(f)\n sizes = pkl.load(f)\n try:\n last_frame_id = pkl.load(f)\n except:\n last_frame_id = -1\n try:\n landmark_fnames = pkl.load(f)\n except:\n landmark_fnames = [None]*len(detection_fnames)\n\n return detection_fnames, landmark_fnames, centers, sizes, last_frame_id" }, { "identifier": "bbox2point", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbox2point(left, right, top, bottom, type='bbox'):\n ''' bbox from detector and landmarks are different\n '''\n if type == 'kpt68':\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0\n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n elif type == 'bbox':\n old_size = (right - left + bottom - top) / 2\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0 + old_size * 0.12\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0 + old_size * 0.12])\n elif type == \"mediapipe\":\n old_size = (right - left + bottom - top) / 2 * 1.1\n center_x = right - (right - left) / 2.0 \n center_y = bottom - (bottom - top) / 2.0\n # center = np.array([right - (right - left) / 2.0, bottom - (bottom - top) / 2.0])\n else:\n raise NotImplementedError(f\" bbox2point not implemented for {type} \")\n if isinstance(center_x, np.ndarray):\n center = np.stack([center_x, center_y], axis=1)\n else: \n center = np.array([center_x, center_y])\n return old_size, center" }, { "identifier": "bbpoint_warp", "path": "inferno/datasets/ImageDatasetHelpers.py", "snippet": "def bbpoint_warp(image, center, size, target_size_height, target_size_width=None, output_shape=None, inv=True, landmarks=None, \n order=3 # order of interpolation, bicubic by default\n ):\n target_size_width = target_size_width or target_size_height\n tform = point2transform(center, size, target_size_height, target_size_width)\n tf = tform.inverse if inv else tform\n output_shape = output_shape or (target_size_height, target_size_width)\n dst_image = warp(image, tf, output_shape=output_shape, order=order)\n if landmarks is None:\n return dst_image\n # points need the matrix\n if isinstance(landmarks, np.ndarray):\n assert isinstance(landmarks, np.ndarray)\n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = tf_lmk(landmarks[:, :2])\n elif isinstance(landmarks, list): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = [] \n for i in range(len(landmarks)):\n dst_landmarks += [tf_lmk(landmarks[i][:, :2])]\n elif isinstance(landmarks, dict): \n tf_lmk = tform if inv else tform.inverse\n dst_landmarks = {}\n for key, value in landmarks.items():\n dst_landmarks[key] = tf_lmk(landmarks[key][:, :2])\n else: \n raise ValueError(\"landmarks must be np.ndarray, list or dict\")\n return dst_image, dst_landmarks" }, { "identifier": "EmotionalImageDatasetBase", "path": "inferno/datasets/EmotionalImageDataset.py", "snippet": "class EmotionalImageDatasetBase(torch.utils.data.Dataset):\n\n\n def _augment(self, img, seg_image, landmark, input_img_shape=None):\n\n if self.transforms is not None:\n assert img.dtype == np.uint8\n # img = img.astype(np.float32) # TODO: debug this (do we get valid images when not used?)\n res = self.transforms(image=img,\n segmentation_maps=seg_image,\n keypoints=landmark)\n if seg_image is not None and landmark is not None:\n img, seg_image, landmark = res\n elif seg_image is not None:\n img, seg_image = res\n elif landmark is not None:\n img, _, landmark = res\n else:\n img = res\n\n \n assert img.dtype == np.uint8\n if img.dtype != np.float32:\n img = img.astype(np.float32) / 255.0\n \n assert img.dtype == np.float32\n\n\n if seg_image is not None:\n seg_image = np.squeeze(seg_image)[..., np.newaxis].astype(np.float32)\n\n if landmark is not None:\n landmark = np.squeeze(landmark)\n if isinstance(self.landmark_normalizer, KeypointScale):\n self.landmark_normalizer.set_scale(\n img.shape[0] / input_img_shape[0],\n img.shape[1] / input_img_shape[1])\n elif isinstance(self.landmark_normalizer, KeypointNormalization):\n self.landmark_normalizer.set_scale(img.shape[0], img.shape[1])\n # self.landmark_normalizer.set_scale(input_img_shape[0], input_img_shape[1])\n else:\n raise ValueError(f\"Unsupported landmark normalizer type: {type(self.landmark_normalizer)}\")\n landmark = self.landmark_normalizer(landmark)\n\n return img, seg_image, landmark\n\n\n\n def visualize_sample(self, sample):\n if isinstance(sample, int):\n sample = self[sample]\n\n import matplotlib.pyplot as plt\n num_images = 1\n if 'mask' in sample.keys():\n num_images += 1\n\n if 'landmark' in sample.keys():\n num_images += 1\n if 'landmark_mediapipe' in sample.keys():\n num_images += 1\n\n if \"mica_images\" in sample.keys():\n num_images += 1\n\n if len(sample[\"image\"].shape) >= 4:\n K = sample[\"image\"].shape[0]\n fig, axs = plt.subplots(K, num_images)\n else:\n K = None\n fig, axs = plt.subplots(1, num_images)\n\n # if K is not None:\n for k in range(K or 1):\n self._plot(axs, K, k, sample)\n plt.show()\n\n def _plot(self, axs, K, k, sample):\n\n from inferno.utils.DecaUtils import tensor_vis_landmarks\n\n def index_axis(i, k):\n if K==1 or K is None:\n return axs[i]\n return axs[k,i]\n\n im = sample[\"image\"][k, ...] if K is not None else sample[\"image\"]\n im_expanded = im[np.newaxis, ...]\n\n i = 0\n f = index_axis(i, k).imshow(im.numpy().transpose([1, 2, 0]))\n index_axis(i, k).set_xlabel('Input image')\n i += 1\n\n if \"mica_images\" in sample.keys():\n mica_im = sample[\"mica_images\"][k, ...] if K is not None else sample[\"mica_images\"]\n mica_im = mica_im.numpy().transpose([1, 2, 0])\n mica_im = (mica_im + 1) / 2\n index_axis(i, k).imshow(mica_im)\n # add a caption to the axes.\n index_axis(i, k).set_xlabel(\"MICA image\")\n i += 1\n\n if 'landmark' in sample.keys():\n lmk = sample[\"landmark\"][k, ...] if K is not None else sample[\"landmark\"]\n lmk_expanded = lmk[np.newaxis, ...]\n lmk_im = tensor_vis_landmarks(im_expanded,\n self.landmark_normalizer.inv(lmk_expanded),\n isScale=False, rgb2bgr=False, scale_colors=True).numpy()[0] \\\n .transpose([1, 2, 0])\n index_axis(i, k).imshow(lmk_im)\n i += 1\n\n if 'landmark_mediapipe' in sample.keys():\n lmk = sample[\"landmark_mediapipe\"][k, ...] if K is not None else sample[\"landmark_mediapipe\"]\n lmk_expanded = lmk[np.newaxis, ...]\n lmk_im = tensor_vis_landmarks(im_expanded,\n self.landmark_normalizer.inv(lmk_expanded),\n isScale=False, rgb2bgr=False, scale_colors=True).numpy()[0] \\\n .transpose([1, 2, 0])\n index_axis(i, k).imshow(lmk_im)\n i += 1\n\n if 'mask' in sample.keys():\n mask = sample[\"mask\"][k, ...] if K is not None else sample[\"mask\"]\n if mask.ndim == 2:\n mask = mask[np.newaxis, ...]\n index_axis(i, k).imshow(mask.numpy().transpose([1, 2, 0]).squeeze(), cmap='gray')\n i += 1\n\n\n if 'path' in sample.keys() and 'label' in sample.keys():\n if K is None:\n print(f\"Path = {sample['path']}\")\n print(f\"Label = {sample['label']}\")\n else:\n print(f\"Path {k} = {sample['path'][k]}\")\n print(f\"Label {k} = {sample['label'][k]}\")" }, { "identifier": "UnsupervisedImageDataset", "path": "inferno/datasets/UnsupervisedImageDataset.py", "snippet": "class UnsupervisedImageDataset(torch.utils.data.Dataset):\n\n def __init__(self, image_list, landmark_list=None, image_transforms=None, im_read=None, \n align_landmarks=False):\n super().__init__()\n self.image_list = image_list\n self.landmark_list = landmark_list\n if landmark_list is not None and len(landmark_list) != len(image_list):\n raise RuntimeError(\"There must be a landmark for every image\")\n self.image_transforms = image_transforms\n self.im_read = im_read or 'skio'\n if self.im_read in ['skvreader', 'skvffmpeg']:\n self.ordered = True\n self.last_index = -1\n else: \n self.ordered = False\n if self.im_read == 'skvffmpeg':\n self.next_frame_it = self.image_list.nextFrame()\n\n if isinstance(self.image_list, np.ndarray): \n self.im_read = None\n\n def __getitem__(self, index):\n if self.ordered: \n if index != self.last_index + 1:\n raise RuntimeError(\"The images must be read in order because of the skvideo reader\")\n self.last_index = index\n # if index < len(self.image_list):\n # x = self.mnist_data[index]\n # raise IndexError(\"Out of bounds\")\n try:\n if isinstance(self.image_list, np.ndarray):\n img = self.image_list[index].transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img)\n path = f\"{index:05d}\"\n elif self.im_read == 'skio':\n img = imread(self.image_list[index])\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img)\n path = str(self.image_list[index])\n elif self.im_read == 'pil':\n img = Image.open(self.image_list[index])\n img_torch = ToTensor()(img)\n path = str(self.image_list[index])\n # path = f\"{index:05d}\"\n elif self.im_read == 'skvreader':\n img = next(self.image_list)\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img) / 255.\n path = f\"{index:05d}\"\n elif self.im_read == 'skvffmpeg':\n img = next(self.next_frame_it)\n img = img.transpose([2, 0, 1]).astype(np.float32)\n img_torch = torch.from_numpy(img) / 255.\n path = f\"{index:05d}\"\n else:\n raise ValueError(f\"Invalid image reading method {self.im_read}\")\n except Exception as e:\n print(f\"Failed to read '{self.image_list[index]}'. File is probably corrupted. Rerun data processing\")\n raise e\n\n if self.image_transforms is not None:\n img_torch = self.image_transforms(img_torch)\n\n batch = {\"image\" : img_torch,\n \"path\" : path}\n\n if self.landmark_list is not None:\n landmark_type, landmark = load_landmark(self.landmark_list[index])\n landmark_torch = torch.from_numpy(landmark)\n\n if self.image_transforms is not None:\n landmark_torch = self.image_transforms(landmark_torch)\n\n batch[\"landmark\"] = landmark_torch\n\n return batch\n\n def __len__(self):\n if self.im_read in ['skvreader', 'skvffmpeg']:\n return self.image_list.getShape()[0]\n return len(self.image_list)" }, { "identifier": "save_landmark", "path": "inferno/utils/FaceDetector.py", "snippet": "def save_landmark(fname, landmark, landmark_type):\n with open(fname, \"wb\") as f:\n pkl.dump(landmark_type, f)\n pkl.dump(landmark, f)" }, { "identifier": "load_landmark", "path": "inferno/utils/FaceDetector.py", "snippet": "def load_landmark(fname):\n with open(fname, \"rb\") as f:\n landmark_type = pkl.load(f)\n landmark = pkl.load(f)\n return landmark_type, landmark" }, { "identifier": "create_image_augmenter", "path": "inferno/transforms/imgaug.py", "snippet": "def create_image_augmenter(im_size, augmentation=None) -> imgaug.augmenters.Augmenter:\n # augmenter_list = [imgaug.augmenters.Resize(im_size)]\n augmenter_list = []\n if augmentation is not None:\n augmenter_list += [augmenter_from_dict(augmentation)]\n augmenter_list += [imgaug.augmenters.Resize(im_size)]\n augmenter = imgaug.augmenters.Sequential(augmenter_list)\n return augmenter" }, { "identifier": "class_from_str", "path": "inferno/utils/other.py", "snippet": "def class_from_str(str, module=None, none_on_fail = False) -> type:\n if module is None:\n module = sys.modules[__name__]\n if hasattr(module, str):\n cl = getattr(module, str)\n return cl\n elif str.lower() == 'none' or none_on_fail:\n return None\n raise RuntimeError(f\"Class '{str}' not found.\")" } ]
import json import os, sys import numpy as np import scipy as sp import torch import pytorch_lightning as pl import pandas as pd import pickle as pkl import imgaug import traceback import json import bisect import warnings import yaml from enum import Enum from pathlib import Path from skimage.io import imread, imsave from skimage.transform import resize, rescale from inferno.datasets.IO import load_segmentation, process_segmentation, load_emotion, save_emotion from inferno.utils.image import numpy_image_to_torch from inferno.transforms.keypoints import KeypointNormalization from inferno.datasets.FaceDataModuleBase import FaceDataModuleBase from inferno.datasets.ImageDatasetHelpers import bbox2point, bbpoint_warp from inferno.datasets.EmotionalImageDataset import EmotionalImageDatasetBase from inferno.datasets.UnsupervisedImageDataset import UnsupervisedImageDataset from inferno.utils.FaceDetector import save_landmark, load_landmark from tqdm import auto from torch.utils.data.dataloader import DataLoader from inferno.transforms.imgaug import create_image_augmenter from torchvision.transforms import Resize, Compose from sklearn.neighbors import NearestNeighbors from torch.utils.data._utils.collate import default_collate from torch.utils.data.sampler import WeightedRandomSampler from collections import OrderedDict from munch import Munch from inferno.utils.other import class_from_str from omegaconf import OmegaConf, DictConfig from inferno.layers.losses.EmonetLoader import get_emonet
12,597
# for i in range(feat.shape[0]): # idx = bi*self.train_batch_size + i array[bi*self.train_batch_size:bi*self.train_batch_size + feat.shape[0], ...] = feat del array else: print(f"Feature array found in '{outfile_name}'") for bi, batch in enumerate(dl): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] break array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype, modifier='r') nbrs = NearestNeighbors(n_neighbors=30, algorithm='auto', n_jobs=-1).fit(array) distances, indices = nbrs.kneighbors(array, NUM_NEIGHBORS) indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="w+", shape=indices.shape ) indices_array[...] = indices del indices_array distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="w+", shape=distances.shape ) distances_array[...] = distances del distances_array # save sizes a dtypes with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(indices.dtype, f) pkl.dump(indices.shape, f) with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(distances.dtype, f) pkl.dump(distances.shape, f) self.nn_indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="r", shape=indices.shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="r", shape=distances.shape ) class AfewVaDataVisTestModule(AfewVaDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setup(self, stage=None): self.training_set = None self.validation_set = TestSubsetAfewVa(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = TestSubsetAfewVa(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def val_dataloader(self): return DataLoader(self.validation_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] class AfewVa(EmotionalImageDatasetBase): def __init__(self, image_path, sample_list, image_size, scale = 1.4, transforms : imgaug.augmenters.Augmenter = None, use_gt_bb=True, bb_center_shift_x=0.0, bb_center_shift_y=0.0, ring_type=None, ring_size=None, load_emotion_feature=False, nn_indices_array=None, nn_distances_array=None, ext=".png", use_processed = None, normalize_va = None, ): self.sample_list = sample_list self.image_path = image_path self.image_size = image_size self.use_gt_bb = use_gt_bb # self.transforms = transforms or imgaug.augmenters.Identity() self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.scale = scale
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at [email protected] # For commercial licensing contact, please contact [email protected] """ warnings.filterwarnings('ignore') # def make_class_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_va_balanced_sampler(labels): # class_counts = np.bincount(labels) # class_weights = 1. / class_counts # weights = class_weights[labels] # return WeightedRandomSampler(weights, len(weights)) # # def make_balanced_sample_by_weights(weights): # return WeightedRandomSampler(weights, len(weights)) def new_affewva(class_name): dataset_class = class_from_str(class_name, sys.modules[__name__]) return dataset_class class AfewVaDataModule(FaceDataModuleBase): def __init__(self, input_dir, output_dir, processed_subfolder = None, face_detector='fan', face_detector_threshold=0.9, image_size=224, scale=1.25, bb_center_shift_x=0., bb_center_shift_y=0., processed_ext=".png", device=None, augmentation=None, train_batch_size=64, val_batch_size=64, test_batch_size=64, num_workers=0, ring_type=None, ring_size=None, drop_last=False, sampler=None, split_seed=0, train_fraction=0.6, val_fraction=0.2, test_fraction=0.2, k_fold_crossvalidation=None, k_index=None, dataset_type=None, ): super().__init__(input_dir, output_dir, processed_subfolder, face_detector=face_detector, face_detector_threshold=face_detector_threshold, image_size=image_size, bb_center_shift_x=bb_center_shift_x, bb_center_shift_y=bb_center_shift_y, scale=scale, processed_ext=processed_ext, device=device) self.dataset_type = dataset_type or "AfewVa" # # self.subsets = sorted([f.name for f in (Path(input_dir) / "Manually_Annotated" / "Manually_Annotated_Images").glob("*") if f.is_dir()]) # self.input_dir = Path(self.root_dir) / "Manually_Annotated" / "Manually_Annotated_Images" # train = pd.read_csv(self.input_dir.parent / "training.csv") # val = pd.read_csv(self.input_dir.parent / "validation.csv") # self.df = pd.concat([train, val], ignore_index=True, sort=False) self.face_detector_type = 'fan' self.scale = scale self.use_processed = False if not (Path(self.output_dir) / "gt.pkl").exists(): video_list = sorted([p for p in Path(input_dir).glob("*") if p.is_dir()]) video_gts = OrderedDict() for iv, vp in enumerate(auto.tqdm(video_list)): video_gts[vp.stem] = Munch( json.load(open(vp / (vp.stem + ".json"), "r"))) with open(Path(self.output_dir) / "gt.pkl", "wb") as f: pkl.dump(video_gts, f) else: with open(Path(self.output_dir) / "gt.pkl", "rb") as f: video_gts = pkl.load(f) if self.use_processed: self.image_path = Path(self.output_dir) / "detections" else: self.image_path = Path(input_dir) self.seed = split_seed np.random.seed(self.seed) indices = np.arange(len(video_gts), dtype=np.int32) + 1 np.random.shuffle(indices) if k_fold_crossvalidation is not None: training_indices = [] validation_indices = [] for k in range(k_fold_crossvalidation): start_i = (k * len(indices)) // k_fold_crossvalidation end_i = ((k + 1) * len(indices)) // k_fold_crossvalidation training_indices += [np.concatenate([indices[0:(start_i)], indices[end_i:]])] validation_indices += [indices[start_i:end_i]] self.train_indices = training_indices[k_index] self.val_indices = validation_indices[k_index] self.test_indices = np.copy(validation_indices[k_index]) else: self.train_fraction = train_fraction self.val_fraction = val_fraction self.test_fraction = test_fraction assert self.train_fraction + self.val_fraction + self.test_fraction == 1.0 train_end = int(len(indices) * self.train_fraction) val_end = int(len(indices) * ( self.train_fraction + self.val_fraction)) self.train_indices = indices[:train_end] self.val_indices = indices[train_end:val_end] self.test_indices = indices[val_end:] # iterate over the training indices and create a list of the corresponding video names self.train_list = OrderedDict() self.val_list = OrderedDict() self.test_list = OrderedDict() for tr_i in self.train_indices: self.train_list[f"{tr_i:03d}"] = video_gts[f"{tr_i:03d}"] for v_i in self.val_indices: self.val_list[f"{v_i:03d}"] = video_gts[f"{v_i:03d}"] for t_i in self.test_indices: self.test_list[f"{t_i:03d}"] = video_gts[f"{t_i:03d}"] # self.ignore_invalid = ignore_invalid self.train_batch_size = train_batch_size self.val_batch_size = val_batch_size self.test_batch_size = test_batch_size self.num_workers = num_workers self.augmentation = augmentation self.sampler = sampler or "uniform" if self.sampler not in ["uniform", "balanced_videos", "balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise ValueError(f"Invalid sampler type: '{self.sampler}'") if self.sampler in ["balanced_expr", "balanced_va", "balanced_v", "balanced_a"]: raise NotImplementedError() if ring_type not in [None, "gt_va", "augment"]: raise ValueError(f"Invalid ring type: '{ring_type}'") if ring_type == "gt_va": raise NotImplementedError() self.ring_type = ring_type self.ring_size = ring_size self.drop_last = drop_last @property def subset_size(self): return 1000 # @property # def num_subsets(self): # num_subsets = len(self.df) // self.subset_size # if len(self.df) % self.subset_size != 0: # num_subsets += 1 # return num_subsets def _detect_faces(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._detect_landmarks_and_segment_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _extract_emotion_features(self): subset_size = 1000 num_subsets = len(self.df) // subset_size if len(self.df) % subset_size != 0: num_subsets += 1 for sid in range(self.num_subsets): self._extract_emotion_features_from_subset(self.subset_size * sid, min((sid + 1) * self.subset_size, len(self.df))) def _path_to_detections(self): return Path(self.output_dir) / "detections" def _path_to_segmentations(self): return Path(self.output_dir) / "segmentations" def _path_to_landmarks(self): return Path(self.output_dir) / "landmarks" def _path_to_emotions(self): return Path(self.output_dir) / "emotions" def _get_emotion_net(self, device): net = get_emonet() net = net.to(device) return net, "emo_net" def _extract_emotion_features_from_subset(self, start_i, end_i): self._path_to_emotions().mkdir(parents=True, exist_ok=True) print(f"Processing subset {start_i // self.subset_size}") image_file_list = [] for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] in_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + ".png") if in_detection_fname.is_file(): image_file_list += [in_detection_fname] transforms = Compose([ Resize((256, 256)), ]) batch_size = 32 dataset = UnsupervisedImageDataset(image_file_list, image_transforms=transforms, im_read='pil') loader = DataLoader(dataset, batch_size=batch_size, num_workers=4, shuffle=False) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') print(device) net, emotion_type = self._get_emotion_net(device) for i, batch in enumerate(auto.tqdm(loader)): # facenet_pytorch expects this stanadrization for the input to the net # images = fixed_image_standardization(batch['image'].to(device)) images = batch['image'].cuda() # start = time.time() with torch.no_grad(): out = net(images, intermediate_features=True) # end = time.time() # print(f" Inference batch {i} took : {end - start}") emotion_features = {key : val.detach().cpu().numpy() for key, val in out.items()} # start = time.time() for j in range(images.size()[0]): image_path = batch['path'][j] out_emotion_folder = self._path_to_emotions() / Path(image_path).parent.name out_emotion_folder.mkdir(exist_ok=True, parents=True) emotion_path = out_emotion_folder / (Path(image_path).stem + ".pkl") emotion_feature_j = {key: val[j] for key, val in emotion_features.items()} del emotion_feature_j['emo_feat'] # too large to be stored per frame = (768, 64, 64) del emotion_feature_j['heatmap'] # not too large but probably not usefull = (68, 64, 64) # we are keeping emo_feat_2 (output of last conv layer (before FC) and then the outputs of the FCs - expression, valence and arousal) save_emotion(emotion_path, emotion_feature_j, emotion_type) def _detect_landmarks_and_segment_subset(self, start_i, end_i): self._path_to_detections().mkdir(parents=True, exist_ok=True) self._path_to_segmentations().mkdir(parents=True, exist_ok=True) self._path_to_landmarks().mkdir(parents=True, exist_ok=True) detection_fnames = [] out_segmentation_folders = [] status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) completed = status_array[start_i // self.subset_size] if not completed: print(f"Processing subset {start_i // self.subset_size}") for i in auto.tqdm(range(start_i, end_i)): im_file = self.df.loc[i]["subDirectory_filePath"] left = self.df.loc[i]["face_x"] top = self.df.loc[i]["face_y"] right = left + self.df.loc[i]["face_width"] bottom = top + self.df.loc[i]["face_height"] bb = np.array([top, left, bottom, right]) im_fullfile = Path(self.input_dir) / im_file try: detection, _, _, bbox_type, landmarks, orig_landmarks = self._detect_faces_in_image(im_fullfile, detected_faces=[bb]) except Exception as e: # except ValueError as e: print(f"Failed to load file:") print(f"{im_fullfile}") print(traceback.print_exc()) continue # except SyntaxError as e: # print(f"Failed to load file:") # print(f"{im_fullfile}") # print(traceback.print_exc()) # continue out_detection_fname = self._path_to_detections() / Path(im_file).parent / (Path(im_file).stem + self.processed_ext) # detection_fnames += [out_detection_fname.relative_to(self.output_dir)] out_detection_fname.parent.mkdir(exist_ok=True) detection_fnames += [out_detection_fname] if self.processed_ext in [".jpg", ".JPG"]: imsave(out_detection_fname, detection[0], quality=100) else: imsave(out_detection_fname, detection[0]) # out_segmentation_folders += [self._path_to_segmentations() / Path(im_file).parent] # save landmarks out_landmark_fname = self._path_to_landmarks() / Path(im_file).parent / (Path(im_file).stem + ".pkl") out_landmark_fname.parent.mkdir(exist_ok=True) # landmark_fnames += [out_landmark_fname.relative_to(self.output_dir)] save_landmark(out_landmark_fname, landmarks[0], bbox_type) self._segment_images(detection_fnames, self._path_to_segmentations(), path_depth=1) status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r+', shape=(self.num_subsets,) ) status_array[start_i // self.subset_size] = True status_array.flush() del status_array print(f"Processing subset {start_i // self.subset_size} finished") else: print(f"Subset {start_i // self.subset_size} is already processed") @property def status_array_path(self): return Path(self.output_dir) / "status.memmap" @property def is_processed(self): status_array = np.memmap(self.status_array_path, dtype=np.bool, mode='r', shape=(self.num_subsets,) ) all_processed = status_array.all() return all_processed def prepare_data(self): pass # if self.use_processed: # if not self.status_array_path.is_file(): # print(f"Status file does not exist. Creating '{self.status_array_path}'") # self.status_array_path.parent.mkdir(exist_ok=True, parents=True) # status_array = np.memmap(self.status_array_path, # dtype=np.bool, # mode='w+', # shape=(self.num_subsets,) # ) # status_array[...] = False # del status_array # # all_processed = self.is_processed # if not all_processed: # self._detect_faces() # # # if self.ring_type == "emonet_feature": # self._prepare_emotion_retrieval() def _new_training_set(self, for_training=True): if for_training: im_transforms_train = create_image_augmenter(self.image_size, self.augmentation) if self.ring_type == "emonet_feature": prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' self._load_retrieval_arrays(prefix, feature_label) nn_indices = self.nn_indices_array nn_distances = self.nn_distances_array else: nn_indices = None nn_distances = None return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, im_transforms_train, ring_type=self.ring_type, ring_size=self.ring_size, load_emotion_feature=False, nn_indices_array=nn_indices, nn_distances_array= nn_distances, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) return new_affewva(self.dataset_type)(self.image_path, self.train_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, load_emotion_feature=True, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def setup(self, stage=None): self.training_set = self._new_training_set() self.validation_set = new_affewva(self.dataset_type)(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = new_affewva(self.dataset_type)(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) # if self.mode in ['all', 'manual']: # # self.image_list += sorted(list((Path(self.path) / "Manually_Annotated").rglob(".jpg"))) # self.dataframe = pd.load_csv(self.path / "Manually_Annotated" / "Manually_Annotated.csv") # if self.mode in ['all', 'automatic']: # # self.image_list += sorted(list((Path(self.path) / "Automatically_Annotated").rglob("*.jpg"))) # self.dataframe = pd.load_csv( # self.path / "Automatically_Annotated" / "Automatically_annotated_file_list.csv") def train_dataloader(self): if self.sampler == "uniform": sampler = None else: raise NotImplementedError() # elif self.sampler == "balanced_expr": # sampler = make_class_balanced_sampler(self.training_set.df["expression"].to_numpy()) # elif self.sampler == "balanced_va": # sampler = make_balanced_sample_by_weights(self.training_set.va_sample_weights) # elif self.sampler == "balanced_v": # sampler = make_balanced_sample_by_weights(self.training_set.v_sample_weights) # elif self.sampler == "balanced_a": # sampler = make_balanced_sample_by_weights(self.training_set.a_sample_weights) # else: # raise ValueError(f"Invalid sampler value: '{self.sampler}'") dl = DataLoader(self.training_set, shuffle=sampler is None, num_workers=self.num_workers, pin_memory=True, batch_size=self.train_batch_size, drop_last=self.drop_last, sampler=sampler) return dl def val_dataloader(self): return DataLoader(self.validation_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=True, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] def _get_retrieval_array(self, prefix, feature_label, dataset_size, feature_shape, feature_dtype, modifier='w+'): outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if outfile_name.is_file() and modifier != 'r': raise RuntimeError(f"The retrieval array already exists! '{outfile_name}'") shape = tuple([dataset_size] + list(feature_shape)) outfile_name.parent.mkdir(exist_ok=True, parents=True) array = np.memmap(outfile_name, dtype=feature_dtype, mode=modifier, shape=shape ) return array def _path_to_emotion_nn_indices_file(self, prefix, feature_label): nn_indices_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_indices.memmap") return nn_indices_file def _path_to_emotion_nn_distances_file(self, prefix, feature_label): nn_distances_file = Path(self.output_dir) / "cache" / (prefix + feature_label + "_nn_distances.memmap") return nn_distances_file def _path_to_emotion_nn_retrieval_file(self, prefix, feature_label): outfile_name = Path(self.output_dir) / "cache" / (prefix + feature_label + ".memmap") return outfile_name def _load_retrieval_arrays(self, prefix, feature_label): # prefix = self.mode + "_train_" # if self.ignore_invalid: # prefix += "valid_only_" # feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) try: with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "rb") as f: indices_array_dtype = pkl.load(f) indices_array_shape = pkl.load(f) except: indices_array_dtype = np.int64, indices_array_shape = (len(dataset), NUM_NEIGHBORS) try: with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "rb") as f: distances_array_dtype = pkl.load(f) distances_array_shape = pkl.load(f) except: distances_array_dtype = np.float32, distances_array_shape = (len(dataset), NUM_NEIGHBORS) self.nn_indices_array = np.memmap(nn_indices_file, # dtype=np.int32, dtype=indices_array_dtype, mode="r", shape=indices_array_shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances_array_dtype, # dtype=np.float64, mode="r", shape=distances_array_shape ) def _prepare_emotion_retrieval(self): prefix = self.mode + "_train_" if self.ignore_invalid: prefix += "valid_only_" feature_label = 'emo_net_emo_feat_2' nn_indices_file = self._path_to_emotion_nn_indices_file(prefix, feature_label) nn_distances_file = self._path_to_emotion_nn_distances_file(prefix, feature_label) NUM_NEIGHBORS = 100 if nn_indices_file.is_file() and nn_distances_file.is_file(): print("Precomputed nn arrays found.") return dataset = self._new_training_set(for_training=False) dl = DataLoader(dataset, shuffle=False, num_workers=self.num_workers, batch_size=self.train_batch_size) array = None if self.ring_type != "emonet_feature": raise ValueError(f"Invalid ring type for emotion retrieval {self.ring_type}") outfile_name = self._path_to_emotion_nn_retrieval_file(prefix, feature_label) if not outfile_name.is_file(): for bi, batch in enumerate(auto.tqdm(dl)): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] if array is None: array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype) # for i in range(feat.shape[0]): # idx = bi*self.train_batch_size + i array[bi*self.train_batch_size:bi*self.train_batch_size + feat.shape[0], ...] = feat del array else: print(f"Feature array found in '{outfile_name}'") for bi, batch in enumerate(dl): feat = batch[feature_label].numpy() feat_size = feat.shape[1:] break array = self._get_retrieval_array(prefix, feature_label, len(dataset), feat_size, feat.dtype, modifier='r') nbrs = NearestNeighbors(n_neighbors=30, algorithm='auto', n_jobs=-1).fit(array) distances, indices = nbrs.kneighbors(array, NUM_NEIGHBORS) indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="w+", shape=indices.shape ) indices_array[...] = indices del indices_array distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="w+", shape=distances.shape ) distances_array[...] = distances del distances_array # save sizes a dtypes with open(nn_indices_file.parent / (nn_indices_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(indices.dtype, f) pkl.dump(indices.shape, f) with open(nn_distances_file.parent / (nn_distances_file.stem + "_meta.pkl"), "wb") as f: pkl.dump(distances.dtype, f) pkl.dump(distances.shape, f) self.nn_indices_array = np.memmap(nn_indices_file, dtype=indices.dtype, mode="r", shape=indices.shape ) self.nn_distances_array = np.memmap(nn_distances_file, dtype=distances.dtype, mode="r", shape=distances.shape ) class AfewVaDataVisTestModule(AfewVaDataModule): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setup(self, stage=None): self.training_set = None self.validation_set = TestSubsetAfewVa(self.image_path, self.val_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) self.test_dataframe_path = Path(self.output_dir) / "validation_representative_selection.csv" self.test_set = TestSubsetAfewVa(self.image_path, self.test_list, self.image_size, self.scale, None, ring_type=None, ring_size=None, ext=self.processed_ext, bb_center_shift_x=self.bb_center_shift_x, bb_center_shift_y=self.bb_center_shift_y, ) def val_dataloader(self): return DataLoader(self.validation_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.val_batch_size, drop_last=False) def test_dataloader(self): return [ self.val_dataloader(), DataLoader(self.test_set, shuffle=False, num_workers=self.num_workers, pin_memory=True, batch_size=self.test_batch_size, drop_last=False) ] class AfewVa(EmotionalImageDatasetBase): def __init__(self, image_path, sample_list, image_size, scale = 1.4, transforms : imgaug.augmenters.Augmenter = None, use_gt_bb=True, bb_center_shift_x=0.0, bb_center_shift_y=0.0, ring_type=None, ring_size=None, load_emotion_feature=False, nn_indices_array=None, nn_distances_array=None, ext=".png", use_processed = None, normalize_va = None, ): self.sample_list = sample_list self.image_path = image_path self.image_size = image_size self.use_gt_bb = use_gt_bb # self.transforms = transforms or imgaug.augmenters.Identity() self.transforms = transforms or imgaug.augmenters.Resize((image_size, image_size)) self.scale = scale
self.landmark_normalizer = KeypointNormalization()
5
2023-11-07 20:13:32+00:00
16k
hxz393/ConfigCenterComparer
ui/action_compare.py
[ { "identifier": "get_resource_path", "path": "lib/get_resource_path.py", "snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对路径,可以是字符串或 os.PathLike 对象。\n :rtype: Optional[str]\n :return: 资源的绝对路径,如果发生错误则返回 None。\n \"\"\"\n\n try:\n base_path = getattr(sys, '_MEIPASS', os.path.abspath(\".\"))\n return os.path.join(base_path, os.path.normpath(relative_path))\n except Exception:\n logger.exception(\"An error occurred while retrieving resource path\")\n return None" }, { "identifier": "log_time", "path": "lib/log_time.py", "snippet": "def log_time(func: Callable) -> Callable:\n \"\"\"\n 一个装饰器,用于记录被装饰函数的运行时间。\n\n 此装饰器在函数执行前后记录时间,计算并记录函数的运行时间。如果函数执行期间出现异常,将记录异常并返回 None。\n\n :param func: 被装饰的函数。\n :type func: Callable\n :return: 包装后的函数。\n :rtype: Callable\n\n :example:\n >>> @log_time\n ... def test_function():\n ... time.sleep(1)\n ...\n >>> test_function() # 这将记录 test_function 的运行时间\n \"\"\"\n\n @wraps(func)\n def wrapper(*args, **kwargs) -> Any:\n \"\"\"\n 包装函数,用于实际执行被装饰的函数并计算其运行时间。\n\n 此函数首先记录开始时间,然后尝试执行原始函数,最后记录结束时间并计算运行时长。如果在执行过程中出现异常,会记录异常信息。\n\n :param args: 原始函数的位置参数。\n :param kwargs: 原始函数的关键字参数。\n :return: 原始函数的返回值,如果出现异常则返回 None。\n :rtype: Any\n \"\"\"\n start_time = time.time()\n try:\n result = func(*args, **kwargs)\n except Exception as e:\n logger.exception(f\"Exception occurred in {func.__name__}: {e}\")\n return None\n else:\n end_time = time.time()\n logger.debug(f\"{func.__name__} executed in {end_time - start_time:.2f} seconds.\")\n return result\n\n return wrapper" }, { "identifier": "ConfigManager", "path": "ui/config_manager.py", "snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")" }, { "identifier": "DialogComparison", "path": "ui/dialog_comparison.py", "snippet": "class DialogComparison(QDialog):\n \"\"\"\n 对话框类,用于展示不同环境下配置的自我比较结果。\n\n :param lang_manager: 语言管理器实例,用于处理语言相关设置。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器实例,用于管理配置。\n :type config_manager: ConfigManager\n :param data: 包含环境配置比较结果的字典。\n :type data: Dict[str, Dict[str, List[Dict[str, str]]]]\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n data: Dict[str, Dict[str, List[Dict[str, str]]]]):\n super().__init__(flags=Qt.Dialog | Qt.WindowCloseButtonHint)\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n self.lang = self.lang_manager.get_lang()\n self.data = data\n\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面组件。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 运行语言配置和设置窗口\n self.setWindowIcon(QIcon(get_resource_path('media/icons8-diff-files-26.png')))\n self.setMinimumSize(1000, 480)\n self.setStyleSheet(\"font-size: 14px;\")\n # 设置主布局\n self.layout = QVBoxLayout(self)\n self.layout.setContentsMargins(0, 0, 0, 0)\n # 创建过滤栏\n filter_bar = self._create_filter_bar()\n self.layout.addWidget(filter_bar)\n # 加入横向分割线\n separator = QFrame()\n separator.setFrameShape(QFrame.HLine)\n separator.setFrameShadow(QFrame.Sunken)\n self.layout.addWidget(separator)\n # 运行语言配置,创建表格要用到\n self.update_lang()\n # 创建标签页\n tab_widget = QTabWidget()\n self.layout.addWidget(tab_widget)\n for env in self.env_keys:\n tab_widget.addTab(self._create_tab(env), env)\n except Exception:\n logger.exception(\"Failed to initialize DialogComparison UI components\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _create_filter_bar(self) -> QWidget:\n \"\"\"\n 创建过滤栏组件。包含公共配置标记和搜索功能。\n\n :rtype: QWidget\n :return: 返回过滤栏组件。\n \"\"\"\n # 建立横向过滤器布局\n filter_bar = QWidget()\n layout = QHBoxLayout(filter_bar)\n layout.setContentsMargins(10, 10, 10, 0)\n filter_bar.setLayout(layout)\n\n # 建立标签,加入布局\n self.public_label = QLabel()\n layout.addWidget(self.public_label)\n # 设置输入框\n self.public_box = QLineEdit()\n self.public_box.returnPressed.connect(self.set_public)\n self.public_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.public_box.setMinimumWidth(100)\n self.public_box.setMaximumWidth(200)\n layout.addWidget(self.public_box)\n # 设置按钮\n self.public_button = QPushButton()\n self.public_button.clicked.connect(self.set_public)\n layout.addWidget(self.public_button)\n\n # 加入分割线\n separator = QFrame()\n separator.setFrameShape(QFrame.VLine)\n separator.setFrameShadow(QFrame.Raised)\n layout.addWidget(separator)\n\n # 建立标签,加入布局\n self.search_label = QLabel()\n layout.addWidget(self.search_label)\n # 设置输入框\n self.search_box = QLineEdit()\n self.search_box.returnPressed.connect(self.search_value)\n self.search_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.search_box.setMinimumWidth(100)\n layout.addWidget(self.search_box)\n # 设置按钮\n self.search_button = QPushButton()\n self.search_button.clicked.connect(self.search_value)\n layout.addWidget(self.search_button)\n\n return filter_bar\n\n def _create_tab(self, env: str) -> QWidget:\n \"\"\"\n 创建一个标签页。\n\n :param env: 环境名。\n :type env: str\n\n :rtype: QWidget\n :return: 返回标签页。\n \"\"\"\n tab = QWidget()\n tab_layout = QVBoxLayout(tab)\n table = self._create_table(self.data.get(env, {}))\n tab_layout.addWidget(table)\n\n # 为每个 table 实例化 ActionCopy 和 ActionSave\n table.actionCopy = ActionCopy(self.lang_manager, table)\n table.actionCopy.status_updated.connect(self.forward_status)\n table.actionSave = ActionSave(self.lang_manager, table)\n table.actionSave.status_updated.connect(self.forward_status)\n # 为每个 table 创建右键菜单\n table.setContextMenuPolicy(Qt.CustomContextMenu)\n table.customContextMenuRequested.connect(self._cell_context_menu)\n\n return tab\n\n def _cell_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表格单元格的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n sender = self.sender()\n # 确定sender是QTableWidget,且拥有actionCopy和actionSave属性\n if isinstance(sender, QTableWidget):\n if hasattr(sender, 'actionCopy') and hasattr(sender, 'actionSave'):\n copy = getattr(sender, 'actionCopy')\n save = getattr(sender, 'actionSave')\n menu = QMenu(sender)\n menu.addAction(copy.action_copy)\n menu.addAction(save.action_save)\n menu.exec_(sender.viewport().mapToGlobal(pos))\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.setWindowTitle(self.lang['ui.dialog_comparison_1'])\n # 更新标签页\n self.env_keys = [\n self.lang['ui.dialog_settings_connection_2'],\n self.lang['ui.dialog_settings_connection_3'],\n self.lang['ui.dialog_settings_connection_4'],\n self.lang['ui.dialog_settings_connection_5']\n ]\n self._update_tab_titles(self.env_keys)\n # 更新表头\n self.column_headers = [\n self.lang['ui.table_main_1'],\n self.lang['ui.table_main_2'],\n self.lang['ui.table_main_3'],\n self.lang['ui.action_compare_3'],\n ]\n self._update_all_table_headers(self.column_headers)\n # 更新其他文字\n self.public_label.setText(self.lang['ui.dialog_comparison_2'])\n self.public_button.setText(self.lang['ui.dialog_comparison_3'])\n self.public_box.setToolTip(self.lang['ui.dialog_comparison_5'])\n self.search_label.setText(self.lang['ui.dialog_comparison_4'])\n self.search_button.setText(self.lang['ui.filter_bar_9'])\n self.search_box.setToolTip(self.lang['ui.dialog_comparison_6'])\n\n def _update_tab_titles(self, new_titles: List[str]) -> None:\n \"\"\"\n 更新标签页标题。\n\n :param new_titles: 包含新标题的列表。\n :type new_titles: List[str]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n tab_widget = self.findChild(QTabWidget)\n # 确定标签页存在,且标签数量与新标题数量相等\n if tab_widget is not None and len(new_titles) == tab_widget.count():\n for index, title in enumerate(new_titles):\n tab_widget.setTabText(index, title)\n\n def _update_all_table_headers(self, new_headers: List[str]) -> None:\n \"\"\"\n 更新所有标签页中表格的表头。\n\n :param new_headers: 包含新表头的列表。\n :type new_headers: List[str]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n tab_widget = self.findChild(QTabWidget)\n if tab_widget is None:\n return\n # 循环设置每个标签页中的表格表头\n for i in range(tab_widget.count()):\n table = tab_widget.widget(i).findChild(QTableWidget)\n for j, header in enumerate(new_headers):\n table.setHorizontalHeaderItem(j, QTableWidgetItem(header))\n\n def _create_table(self, items: Dict[str, List[Dict[str, str]]]) -> QTableWidget:\n \"\"\"\n 建立表格并插入数据。\n\n :param items: 包含单个环境配置比较结果。\n :type items: List[Dict[str, str]]]\n\n :rtype: QTableWidget\n :return: 返回建好的表格组件。\n \"\"\"\n table = QTableWidget()\n # 配置表格基本属性\n table.setColumnCount(len(self.column_headers))\n table.setHorizontalHeaderLabels(self.column_headers)\n table.setEditTriggers(QTableWidget.NoEditTriggers)\n table.setSelectionBehavior(QTableWidget.SelectItems)\n table.setTextElideMode(Qt.ElideNone)\n table.horizontalHeader().setMinimumSectionSize(220)\n\n # 向表格插入数据。先计算总行数,禁用更新,优化性能。\n table.setUpdatesEnabled(False)\n table.setRowCount(sum(len(group) for group in items.values()))\n self._insert_data_to_table(table, items)\n table.setUpdatesEnabled(True)\n\n return table\n\n def _insert_data_to_table(self,\n table: QTableWidget,\n items: Dict[str, List[Dict[str, str]]]) -> None:\n \"\"\"\n 向表格插入特定格式的数据。\n\n :param table: 展示结果表格。\n :type table: QTableWidget\n :param items: 包含单个环境配置比较结果。\n :type items: Dict[str, List[Dict[str, str]]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 如果没数据,直接返回\n if not items:\n return\n\n # 两种颜色:白色和灰色\n color_palette = [Qt.white, QColor(COLOR_SKIP)]\n # 开始行数\n row_count = 0\n\n # 索引键不需要,直接获取对结果分好组的列表\n for group_number, item_group in enumerate(items.values(), start=1):\n # 要单元格设置的背景颜色\n group_color = color_palette[group_number % len(color_palette)]\n # 对包含多组配置字典的列表进行处理\n for item_index, item in enumerate(item_group, start=1):\n # 为每行设置组号\n table.setVerticalHeaderItem(row_count, QTableWidgetItem(f\"{group_number}.{item_index}\"))\n # 对表头处理,col_index为列号,key为列标题\n for col_index, key in enumerate(self.column_headers):\n # 避免 KeyError\n value = item.get(key, \"\")\n # 设置单元格数据\n table_item = QTableWidgetItem(str(value))\n # 为单元格设置背景颜色\n table_item.setBackground(group_color)\n # 通过行号、列号和数据信息插入到表格\n table.setItem(row_count, col_index, table_item)\n # 插入完一行后,行号加一\n row_count += 1\n except Exception:\n logger.exception(\"Failed to insert data into table\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def forward_status(self, message: str) -> None:\n \"\"\"\n 用于转发状态信号。\n\n :param message: 要转发的消息。\n :type message: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.status_updated.emit(message)\n\n def _get_current_table(self) -> Optional[QTableWidget]:\n \"\"\"\n 获取当前选中标签页中的表格。\n\n :rtype: Optional[QTableWidget]\n :return: 返回当前选中标签页中的 QTableWidget 实例。没获取到则返回 None。\n \"\"\"\n tab_widget = self.findChild(QTabWidget)\n if tab_widget is None:\n return None\n\n current_tab = tab_widget.currentWidget()\n if current_tab is None:\n return None\n\n table = current_tab.findChild(QTableWidget)\n return table\n\n def set_public(self) -> None:\n \"\"\"\n 根据用户输入公共配置的名称,设置表格中对应行的字体颜色。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 如果关闭颜色设置,直接返回。\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n if color_switch == 'OFF':\n return\n\n # 获取输入值和表格。表格为空则返回。\n public_value = self.public_box.text().strip()\n table = self._get_current_table()\n if table is None:\n return\n\n # 无论输入值是否为空,都先重置表格字体颜色\n self._reset_table_font_color(table)\n\n # 输入值为空,直接返回。\n if not public_value:\n return\n\n # 遍历表格设置匹配行的字体颜色\n for row in range(table.rowCount()):\n cell_item = table.item(row, 0)\n if cell_item and public_value == cell_item.text():\n self._set_row_font_color(table, row, Qt.red)\n\n @staticmethod\n def _set_row_font_color(table: QTableWidget,\n row: int,\n color: str) -> None:\n \"\"\"\n 设置特定行的字体颜色。\n\n :param table: 要操作的表格对象。\n :type table: QTableWidget\n :param row: 行号。\n :type row: int\n :param color: 字体颜色。\n :type color: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for column in range(table.columnCount()):\n cell_item = table.item(row, column)\n if cell_item:\n cell_item.setForeground(QColor(color))\n\n @staticmethod\n def _reset_table_font_color(table: QTableWidget) -> None:\n \"\"\"\n 重置表格所有单元格的字体颜色为黑色。\n\n :param table: 要操作的表格对象。\n :type table: QTableWidget\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for row in range(table.rowCount()):\n for column in range(table.columnCount()):\n cell_item = table.item(row, column)\n if cell_item:\n cell_item.setForeground(Qt.black)\n\n def search_value(self) -> None:\n \"\"\"\n 根据用户输入的搜索字段,去表格中所有配置键和配置值中去搜索匹配。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 获取用户输入的搜索文本和表格。\n search_text = self.search_box.text().strip().lower()\n table = self._get_current_table()\n\n # 如果没有找到表格,直接返回\n if table is None:\n return\n\n # 如果输入框为空,重置所有行为可见\n if not search_text:\n self._reset_row_hidden_status(table)\n return\n\n # 逐行匹配搜索值\n for row in range(table.rowCount()):\n self._search_process(table, row, search_text)\n\n @staticmethod\n def _search_process(table: QTableWidget,\n row: int,\n search_text: str) -> None:\n \"\"\"\n 作用于单行,根据搜索文本设置可见性。\n\n :param table: 表格对象。\n :type table: QTableWidget\n :param row: 当前行号。\n :type row: int\n :param search_text: 搜索文本。\n :type search_text: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 变量和实际显示匹配。先设为False为不显示\n row_contains_search_text = False\n # 只搜索键和值列\n for column in [2, 3]:\n # 获取单元格文本,并小写化,匹配搜索文本,让搜索不区分大小写\n cell_text = table.item(row, column).text().lower()\n # 找到匹配项,跳出内层循环\n if search_text in cell_text:\n row_contains_search_text = True\n break\n\n table.setRowHidden(row, not row_contains_search_text)\n\n @staticmethod\n def _reset_row_hidden_status(table: QTableWidget) -> None:\n \"\"\"\n 重置表格行的隐藏状态。\n\n :param table: 表格对象。\n :type table: QTableWidget\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n if table is not None:\n for row in range(table.rowCount()):\n table.setRowHidden(row, False)" }, { "identifier": "global_signals", "path": "ui/global_signals.py", "snippet": "class GlobalSignals(QObject):" }, { "identifier": "LangManager", "path": "ui/lang_manager.py", "snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")" }, { "identifier": "message_show", "path": "ui/message_show.py", "snippet": "def message_show(message_type: str,\n text: str) -> None:\n \"\"\"\n 显示指定类型的消息框。\n\n 根据提供的消息类型和文本内容,显示相应的消息框。支持的消息类型包括 'Critical'、'Warning' 和 'Information'。\n\n :param message_type: 消息类型,支持 'Critical'、'Warning' 和 'Information'。\n :type message_type: str\n :param text: 消息框中显示的文本内容。\n :type text: str\n :return: 无返回值。\n :rtype: None\n \"\"\"\n try:\n msg_box = QMessageBox()\n msg_box.setText(text)\n msg_box.setStandardButtons(QMessageBox.Ok)\n msg_box.setWindowTitle(message_type)\n\n if message_type == 'Critical':\n msg_box.setIcon(QMessageBox.Critical)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-error-26')))\n elif message_type == 'Warning':\n msg_box.setIcon(QMessageBox.Warning)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-do-not-disturb-26')))\n elif message_type == 'Information':\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-about-26')))\n else:\n logger.warning(\"Invalid message type provided.\")\n\n msg_box.exec_()\n except Exception:\n logger.exception(\"An error occurred while displaying the message box\")" }, { "identifier": "TableMain", "path": "ui/table_main.py", "snippet": "class TableMain(QTableWidget):\n \"\"\"\n 主表格类,用于展示和管理数据行。\n\n 此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。\n 通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。\n\n :param lang_manager: 用于管理界面语言的 LangManager 实例。\n :type lang_manager: LangManager\n :param config_manager: 用于管理配置的 ConfigManager 实例。\n :type config_manager: ConfigManager\n\n :author: assassing\n :contact: https://github.com/hxz393\n :copyright: Copyright 2023, hxz393. 保留所有权利。\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n # 实例化用到的组件\n self.actionCopy = ActionCopy(self.lang_manager, self)\n self.actionSave = ActionSave(self.lang_manager, self)\n self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)\n self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self)\n # 手动连接实例化的组件信号到转发函数\n self.actionCopy.status_updated.connect(self.forward_status)\n self.actionSave.status_updated.connect(self.forward_status)\n self.actionSkip.status_updated.connect(self.forward_status)\n self.actionSkip.filter_updated.connect(self.forward_filter)\n self.actionUnskip.status_updated.connect(self.forward_status)\n self.actionUnskip.filter_updated.connect(self.forward_filter)\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面。\n\n 此方法负责设置表格的基本属性,如列数、表头标签、选择行为等。还包括对特定列的隐藏和宽度调整策略的设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 先运行语言更新,里面有表头定义\n self.update_lang()\n self.hidden_cols = [\"pro_time\", \"pre_time\", \"test_time\", \"dev_time\"]\n self.resize_cols = [\"name\", \"group\", \"consistency\", \"skip\"]\n # 配置表格基本属性\n self.setColumnCount(len(self.column_headers))\n self.setHorizontalHeaderLabels(self.column_headers)\n self.setEditTriggers(QTableWidget.NoEditTriggers)\n self.setSelectionBehavior(QTableWidget.SelectItems)\n # 隐藏垂直表头\n self.verticalHeader().setVisible(False)\n # 启用自动换行,没生效\n self.setWordWrap(True)\n self.setTextElideMode(Qt.ElideNone)\n # 为表头视图设置上下文菜单事件\n self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)\n self.horizontalHeader().customContextMenuRequested.connect(self._header_context_menu)\n # 为表单设置上下文菜单事件\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(self._cell_context_menu)\n # 隐藏指定列\n [self.hideColumn(COL_INFO[i]['col']) for i in self.hidden_cols]\n # 设置表宽度策略\n self.set_header_resize()\n\n def set_header_resize(self):\n \"\"\"\n 设置表头的列宽度和调整策略。\n\n 此方法负责定义表头列的宽度调整策略和其他相关属性。它设置了表头列的默认宽度、是否可拖动以及列的自动调整策略。\n 例如,某些列被设置为根据内容自动调整宽度,而其他列则被设置为可伸缩以适应表格的大小。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 设置默认列宽度,列宽调整策略,列可拖动\n self.horizontalHeader().setSectionsMovable(True)\n self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.horizontalHeader().setMinimumSectionSize(100)\n # 设置要自动调整宽度的列\n [self.horizontalHeader().setSectionResizeMode(COL_INFO[i]['col'], QHeaderView.ResizeToContents) for i in self.resize_cols]\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.column_headers = [\n self.lang['ui.table_main_1'],\n self.lang['ui.table_main_2'],\n self.lang['ui.table_main_3'],\n self.lang['ui.dialog_settings_connection_2'],\n f\"{self.lang['ui.dialog_settings_connection_2']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_3'],\n f\"{self.lang['ui.dialog_settings_connection_3']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_4'],\n f\"{self.lang['ui.dialog_settings_connection_4']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_5'],\n f\"{self.lang['ui.dialog_settings_connection_5']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.table_main_5'],\n self.lang['ui.table_main_6'],\n ]\n # 重新应用到表头\n self.setHorizontalHeaderLabels(self.column_headers)\n # 定义数据和显示映射的字典\n consistency_status_mapping = {\n \"inconsistent\": self.lang['ui.action_start_8'],\n \"fully\": self.lang['ui.action_start_9'],\n \"partially\": self.lang['ui.action_start_10'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n skip_status_mapping = {\n \"no\": self.lang['ui.action_start_11'],\n \"yes\": self.lang['ui.action_start_12'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n for row in range(self.rowCount()):\n # 更新忽略状态文字\n self._update_item_text(row, \"skip\", skip_status_mapping)\n # 更新一致性状态文字\n self._update_item_text(row, \"consistency\", consistency_status_mapping)\n\n def _update_item_text(self,\n row: int,\n user_data_key: str,\n text_mapping: Dict[str, str]) -> None:\n \"\"\"\n 根据提供的文本映射更新指定行的项文本。\n\n 此方法用于更新表格或列表中特定行的文本。它根据用户数据键(user_data_key)获取对应行的项,然后根据提供的文本映射(text_mapping)更新该项的文本。\n\n :param row: 要更新的行索引。\n :type row: int\n :param user_data_key: 用于获取项的用户数据键。\n :type user_data_key: str\n :param text_mapping: 用户数据到文本的映射字典。\n :type text_mapping: Dict[str, str]\n\n :return: 无返回值。\n :rtype: None\n \"\"\"\n item = self.item(row, COL_INFO[user_data_key]['col'])\n if item is not None:\n user_data = item.data(Qt.UserRole)\n if user_data in text_mapping:\n item.setText(text_mapping[user_data])\n\n def keyPressEvent(self, event: QKeyEvent) -> None:\n \"\"\"\n 处理键盘事件。\n\n 此方法用于处理键盘事件,特别是复制功能的快捷键。如果按下 Ctrl+C,则复制选中的单元格内容。\n\n :param event: 键盘事件对象。\n :type event: QKeyEvent\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n if event.key() == Qt.Key_C and (event.modifiers() & Qt.ControlModifier):\n self.actionCopy.action_copy()\n else:\n super().keyPressEvent(event)\n\n def _cell_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表格单元格的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n menu.addAction(self.actionCopy.action_copy)\n separator = QAction(menu)\n separator.setSeparator(True)\n menu.addAction(separator)\n menu.addAction(self.actionSkip.action_skip)\n menu.addAction(self.actionUnskip.action_unskip)\n sep = QAction(menu)\n sep.setSeparator(True)\n menu.addAction(sep)\n menu.addAction(self.actionSave.action_save)\n menu.exec_(self.viewport().mapToGlobal(pos))\n\n def _header_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表头的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n # 动态创建一个菜单项,用于隐藏/显示列\n for index in range(self.columnCount()):\n column_name = self.horizontalHeaderItem(index).text()\n action = menu.addAction(f\"{column_name}\")\n action.setCheckable(True)\n action.setChecked(not self.isColumnHidden(index))\n action.setData(index)\n action.triggered.connect(self._toggle_column_visibility)\n # 在鼠标右键点击位置显示菜单\n menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos))\n\n def _toggle_column_visibility(self) -> None:\n \"\"\"\n 根据用户选择,切换列的可见性。\n\n 此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n action = self.sender()\n if isinstance(action, QAction):\n column_index = action.data()\n if action.isChecked():\n self.showColumn(column_index)\n else:\n self.hideColumn(column_index)\n\n def add_row(self, data: List[List[str]]) -> None:\n \"\"\"\n 向表格中添加一行数据。\n\n :param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n row_position = 0\n try:\n # 获取最后行数\n row_position = self.rowCount()\n # 插入最后一行\n self.insertRow(row_position)\n # 插入单元格数据\n self._fill_row_data(row_position, data)\n except Exception:\n logger.exception(f\"Error occurred while adding a new row at position {row_position}\")\n self.removeRow(row_position)\n\n def _fill_row_data(self,\n row_position: int,\n data: List[List[str]]) -> None:\n \"\"\"\n 填充指定行的数据。\n\n :param row_position: 行位置\n :param data: 行数据\n :type row_position: int\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for column, (display_text, user_data) in enumerate(data):\n # 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole)\n item = QTableWidgetItem(str(display_text))\n # 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole)\n item.setData(Qt.UserRole, user_data)\n # 设置单元格不可编辑状态\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\n # 正常表格插入方法\n self.setItem(row_position, column, item)\n\n @log_time\n def apply_color_to_table(self, rows: List[int] = None) -> None:\n \"\"\"\n 对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。\n\n :param rows: 可选,要应用颜色的行号列表。\n :type rows: List[int], optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n if color_switch == 'OFF':\n return\n\n if rows is None or not isinstance(rows, list):\n rows = range(self.rowCount())\n\n try:\n for row in rows:\n # 不给隐藏行设置颜色\n if self.isRowHidden(row):\n continue\n\n self._process_row_for_color(row)\n except Exception:\n logger.exception(\"Exception in apply_color_to_table method\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _process_row_for_color(self, row: int) -> None:\n \"\"\"\n 根据一致性、跳过状态和是否为空值给单行应用颜色。\n\n :param row: 行号,对每行进行颜色处理。\n :type row: int\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)\n skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)\n # 忽略状态为是时设置颜色\n if skip_data == 'yes':\n self.apply_color(row, COLOR_SKIP)\n return\n\n # 根据一致性值设置颜色\n if consistency_data == 'fully':\n self.apply_color(row, COLOR_CONSISTENCY_FULLY)\n elif consistency_data == 'partially':\n self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY)\n else:\n self.apply_color(row, COLOR_DEFAULT)\n\n # 遍历指定列检查空值,并赋予颜色\n for column in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(column):\n if self.item(row, column).text() == 'None':\n self.apply_color(row, COLOR_EMPTY, column)\n\n def apply_color(self,\n row: int,\n color: str,\n column: Optional[int] = None) -> None:\n \"\"\"\n 为指定的行或单元格应用颜色。\n\n :param row: 要着色的行索引。\n :type row: int\n :param color: 要应用的颜色。\n :type color: str\n :param column: 可选,指定要着色的列索引,如果未指定,则对整行应用颜色。\n :type column: int, optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n color_brush = QBrush(QColor(color))\n if column is not None:\n self.item(row, column).setBackground(color_brush)\n else:\n for col in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(col):\n self.item(row, col).setBackground(color_brush)\n except Exception:\n logger.exception(\"Error occurred while applying color to a cell\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def clear(self) -> None:\n \"\"\"\n 清空表格中的所有行。\n\n 此方法用于清除表格中的所有数据,通常在数据更新或重置时使用。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 禁用更新以提高性能\n self.setUpdatesEnabled(False)\n # 首先清除所有单元格的内容\n self.clearContents()\n # 将行数设置为0,从而删除所有行\n self.setRowCount(0)\n except Exception:\n logger.exception(\"Error occurred while clearing the table.\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 确保即使发生错误也要重新启用更新\n self.setUpdatesEnabled(True)\n\n def forward_status(self, message: str) -> None:\n \"\"\"\n 用于转发状态信号。\n\n :param message: 要转发的消息。\n :type message: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.status_updated.emit(message)\n\n def forward_filter(self, rows: List[int]) -> None:\n \"\"\"\n 用于转发过滤信号。\n\n :param rows: 要转发的行列表。\n :type rows: List[int]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.filter_updated.emit(rows)\n\n def get_table_data(self) -> Dict[int, Dict[str, str]]:\n \"\"\"\n 用于获取表格所有数据。\n\n :rtype: Dict[int, Dict[str, str]]\n :return: 返回嵌套字典。键为行号,值为字典,字典中键为列标题,值为内容。类似于:{882: {'服务': 'web', '分组': 'application'}, 883: {'服务': 'web', '分组': 'application'}}\n \"\"\"\n return {row: {self.horizontalHeaderItem(col).text(): self.item(row, col).data(Qt.UserRole)\n for col in range(self.columnCount())}\n for row in range(self.rowCount())}" } ]
import logging from typing import Dict, Optional, List from PyQt5.QtCore import QObject, pyqtSignal from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction from lib.get_resource_path import get_resource_path from lib.log_time import log_time from ui.config_manager import ConfigManager from ui.dialog_comparison import DialogComparison from ui.global_signals import global_signals from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
12,500
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2']) @log_time def compare(self) -> None: """ 执行数据对比操作。 该方法首先从表格获取原始数据,然后对数据进行重组和对比分析。最终,它将对比结果展示在对话框中。 :rtype: None :return: 无返回值。 """ try: # 获取原表格数据到字典。 original_data = self.table.get_table_data() if not original_data: logger.warning("No data available in the table for comparison.") message_show('Information', self.lang['ui.action_compare_4']) return # 对原表格数据进行重新整理分组。 new_data = self._reorganize_data(original_data) if not new_data: logger.error("Data reorganization failed.") self.status_updated.emit(self.lang['label_status_error']) return # 对整理分组后的数据进行对比。 result = self._compare_environments(new_data) if not result: logger.error("Environment comparison failed.") self.status_updated.emit(self.lang['label_status_error']) return # 打开带表格组件的对话框,展示结果。 self.dialog_comparison = DialogComparison(self.lang_manager, self.config_manager, result) self.dialog_comparison.status_updated.connect(self.forward_status) self.dialog_comparison.show() # 连接全局信号,主窗口关闭时一并关闭。
""" 本文件包含用于处理和比较配置数据的类和函数。 该模块主要包含 `ActionCompare` 类,用于在用户界面中处理数据比较的逻辑。该类提供了对比配置数据、更新界面语言、重组数据等功能,方便用户进行环境配置的对比分析。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionCompare(QObject): """ 提供数据比较功能的类。 该类负责处理用户界面中的数据对比逻辑,包括初始化UI组件、更新语言设置、执行数据对比等功能。它还负责处理各种事件和信号,并更新用户界面状态。 :param lang_manager: 语言管理器实例,用于处理界面语言更新。 :type lang_manager: LangManager :param config_manager: 配置管理器,用于获取网络测试相关配置。 :type config_manager: ConfigManager :param table: 主界面表格实例,提供数据获取和显示功能。 :type table: TableMain """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain): super().__init__() self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.initUI() def initUI(self) -> None: """ 初始化用户界面组件。 :rtype: None :return: 无返回值。 """ self.action_compare = QAction(QIcon(get_resource_path('media/icons8-diff-files-26')), 'Compare') self.action_compare.setShortcut('F8') # 为了记录运行时间,使用匿名函数 self.action_compare.triggered.connect(lambda checked=False: self.compare()) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_compare.setText(self.lang['ui.action_compare_1']) self.action_compare.setStatusTip(self.lang['ui.action_compare_2']) @log_time def compare(self) -> None: """ 执行数据对比操作。 该方法首先从表格获取原始数据,然后对数据进行重组和对比分析。最终,它将对比结果展示在对话框中。 :rtype: None :return: 无返回值。 """ try: # 获取原表格数据到字典。 original_data = self.table.get_table_data() if not original_data: logger.warning("No data available in the table for comparison.") message_show('Information', self.lang['ui.action_compare_4']) return # 对原表格数据进行重新整理分组。 new_data = self._reorganize_data(original_data) if not new_data: logger.error("Data reorganization failed.") self.status_updated.emit(self.lang['label_status_error']) return # 对整理分组后的数据进行对比。 result = self._compare_environments(new_data) if not result: logger.error("Environment comparison failed.") self.status_updated.emit(self.lang['label_status_error']) return # 打开带表格组件的对话框,展示结果。 self.dialog_comparison = DialogComparison(self.lang_manager, self.config_manager, result) self.dialog_comparison.status_updated.connect(self.forward_status) self.dialog_comparison.show() # 连接全局信号,主窗口关闭时一并关闭。
global_signals.close_all.connect(self.close_dialog)
4
2023-11-07 01:02:38+00:00
16k
pytorch-labs/ao
test/test.py
[ { "identifier": "DynamicallyPerAxisQuantizedLinear", "path": "torchao/quantization/dynamic_quant.py", "snippet": "class DynamicallyPerAxisQuantizedLinear(torch.nn.Linear):\n \"\"\"\n This class is a replacement for `torch.nn.Linear`. It implements a\n quantized matmul using int8 dynamic symmetric per-token activation,\n and int8 symmetric per-channel weight quantization\n \"\"\"\n\n def __init__(\n self,\n in_features: int,\n out_features: int,\n bias: bool = True,\n ) -> None:\n super().__init__(in_features, out_features, bias)\n\n def forward(self, X: torch.Tensor, *args, **kwargs) -> torch.Tensor:\n \"\"\"\n Performs the forward pass of the quantized linear layer which consists\n of int8 dynamic symmetric per-token activation and int8 symmetric per-channel weight\n quantization\n\n Args:\n X (torch.Tensor): The input floating point tensor to the quantized linear layer.\n\n Returns:\n torch.Tensor: The output floating point tensor after the quantized matmul and rescale.\n\n \"\"\"\n\n Y = quant_int8_dynamic_per_token_linear(\n X, self.W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(\n cls, mod: torch.nn.Linear\n ) -> \"DynamicallyPerAxisQuantizedLinear\":\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the\n `DynamicallyPerAxisQuantizedLinear` class\n\n Args:\n mod (torch.nn.Linear): The original `torch.nn.Linear` module to convert.\n\n Returns:\n DynamicallyPerAxisQuantizedLinear: The converted quantized linear module.\n\n \"\"\"\n\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features,\n fake_out_features,\n bias=mod.bias is not None,\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n W_int_repr, W_scales, _W_zps = dynamically_quantize_per_channel(\n mod.weight, -128, 127, torch.int8\n )\n new_mod.register_buffer(\"W_int_repr_t\", W_int_repr.contiguous().t())\n new_mod.W_scales = nn.Parameter(W_scales)\n new_mod.bias = mod.bias\n del new_mod.weight\n\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod" }, { "identifier": "apply_dynamic_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_dynamic_quant(model, filter_fn=None):\n \"\"\"\n Applies dynamic symmetric per-token activation and per-channel weight\n quantization to all linear layers in the given model using\n module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n lambda mod: DynamicallyPerAxisQuantizedLinear.from_float(mod),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "apply_weight_only_int8_quant", "path": "torchao/quantization/quant_api.py", "snippet": "def apply_weight_only_int8_quant(model, filter_fn=None):\n \"\"\"\n Applies weight-only symmetric per-channel int8 quantization to all linear layers\n in the given model using module swaps.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n WeightOnlyInt8QuantLinear.from_float,\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int8_dqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_dqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the `Int8DynamicallyQuantizedLinearWeight`\n Tensor subclass, effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n if filter_fn is None:\n filter_fn = (\n lambda *args:\n _is_linear(*args) and\n _in_features_greater_than_16(*args)\n )\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8DynamicallyQuantizedLinearWeight),\n filter_fn\n )" }, { "identifier": "change_linear_weights_to_int8_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int8_woqtensors(model, filter_fn=None):\n \"\"\"\n Converts all linear weight tensors to the\n `Int8WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int8WeightOnlyQuantizedLinearWeight),\n _is_linear if filter_fn is None else filter_fn,\n )" }, { "identifier": "change_linear_weights_to_int4_woqtensors", "path": "torchao/quantization/quant_api.py", "snippet": "def change_linear_weights_to_int4_woqtensors(model, **kwargs):\n \"\"\"\n Converts all linear weight tensors to the\n `Int4WeightOnlyQuantizedLinearWeight` tensor subclass,\n effectively applying the same form of quantization\n as apply_dynamic_quant while not modifying the linear modules.\n \"\"\"\n filter_fn = kwargs.pop(\"filter_fn\", _is_linear)\n\n _replace_with_custom_fn_if_matches_filter(\n model,\n _get_subclass_inserter(Int4WeightOnlyQuantizedLinearWeight, **kwargs),\n filter_fn,\n )" }, { "identifier": "_replace_with_custom_fn_if_matches_filter", "path": "torchao/quantization/quant_api.py", "snippet": "def _replace_with_custom_fn_if_matches_filter(\n model, replacement_fn, filter_fn, cur_fqn=\"\"\n) -> None:\n \"\"\"\n For each `child` in `model`, replaces it with `replacement_fn(child)`\n if `filter_fn(child)` is `True`\n \"\"\"\n if filter_fn(model, cur_fqn[:-1]):\n model = replacement_fn(model)\n return model\n else:\n for name, child in model.named_children():\n new_child = _replace_with_custom_fn_if_matches_filter(\n child, replacement_fn, filter_fn, f\"{cur_fqn}{name}.\"\n )\n if new_child is not child:\n setattr(model, name, new_child)\n return model" }, { "identifier": "dequantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_channel(int_repr, scales, zero_points, out_dtype=torch.float32):\n # assumes axis is 0\n y = int_repr.transpose(0, 1)\n y = y.to(out_dtype)\n y = y - zero_points\n y = y * scales\n y = y.transpose(0, 1)\n return y" }, { "identifier": "dequantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dequantize_per_tensor(int_repr, scale, zero_point, out_dtype=torch.float32):\n y = int_repr.to(out_dtype)\n if zero_point is not None:\n y -= zero_point\n return y * scale" }, { "identifier": "dynamically_quantize_per_channel", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_channel(x, quant_min, quant_max, target_dtype):\n # assumes symmetric quantization\n # assumes axis == 0\n # assumes dense memory format\n # TODO(future): relax ^ as needed\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n # get min and max\n min_val, max_val = torch.aminmax(x, dim=1)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n # reference: https://fburl.com/code/4wll53rk\n max_val_pos = torch.max(-min_val_neg, max_val_pos)\n scale = max_val_pos / (float(quant_max - quant_min) / 2)\n # ensure scale is the same dtype as the original tensor\n scale = torch.clamp(scale, min=eps).to(x.dtype)\n zero_point = torch.zeros(min_val_neg.size(), dtype=torch.int64, device=device)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n x_div = x.transpose(0, 1) / scale\n x_round = torch.round(x_div)\n x_zp = x_round + zero_point\n x_zp = x_zp.transpose(0, 1)\n quant = torch.clamp(x_zp, quant_min, quant_max).to(target_dtype)\n\n return quant, scale, zero_point" }, { "identifier": "dynamically_quantize_per_tensor", "path": "torchao/quantization/quant_primitives.py", "snippet": "def dynamically_quantize_per_tensor(\n x,\n quant_min,\n quant_max,\n target_dtype,\n qscheme=torch.per_tensor_affine, # for now, reuse existing qscheme enum\n):\n # assumes affine quantization\n\n # default setup for affine quantization of activations\n eps = torch.finfo(torch.float32).eps\n\n if qscheme == torch.per_tensor_affine:\n # get min and max\n # TODO(future): make torch.aminmax work on cpu-half\n # min_val, max_val = torch.aminmax(x)\n min_val = torch.min(x)\n max_val = torch.max(x)\n\n # calculate scale and zero point based on min and max\n # reference: https://fburl.com/code/srbiybme\n min_val_neg = torch.min(min_val, torch.zeros_like(min_val))\n max_val_pos = torch.max(max_val, torch.zeros_like(max_val))\n device = min_val_neg.device\n\n scale = (max_val_pos - min_val_neg) / float(quant_max - quant_min)\n # TODO(future): make torch.clamp with scalar work on cpu-half\n scale = torch.clamp(scale, min=eps).reshape(1)\n zero_point = quant_min - torch.round(min_val_neg / scale).to(torch.int)\n zero_point = torch.clamp(zero_point, quant_min, quant_max)\n\n # quantize based on qmin/qmax/scale/zp\n # reference: torch/ao/quantization/fx/_decomposed.py?lines=63\n quant = torch.clamp(\n torch.round(x / scale) + zero_point, quant_min, quant_max\n ).to(target_dtype)\n\n else:\n assert qscheme == torch.per_tensor_symmetric, f\"unsupported qscheme {qscheme}\"\n # assert quant_min == -1 * quant_max, \"unsupported quant_min/quant_max\"\n amax = torch.max(torch.abs(x))\n scale = amax / (float(quant_max - quant_min) / 2)\n scale = torch.clamp(scale, min=eps).reshape(1)\n quant = torch.clamp(torch.round(x / scale), quant_min, quant_max).to(\n target_dtype\n )\n # do not create a tensor for zero_point as this is expensive\n zero_point = None\n\n return quant, scale, zero_point" }, { "identifier": "quant_int8_dynamic_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_linear(\n x,\n x_quant_min,\n x_quant_max,\n x_q_dtype,\n w_vals_int8_t,\n w_scales,\n w_vals_int8_t_sums_int64,\n bias,\n out_dtype=torch.float32,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scale, x_zp = dynamically_quantize_per_tensor(\n x, x_quant_min, x_quant_max, x_q_dtype\n )\n # w_vals_int8_t_sums_int64 = w_vals_int8_t.sum(dim=0)\n mm_out = quant_int8_matmul(\n x_vals_int8,\n x_scale,\n x_zp,\n w_vals_int8_t,\n w_vals_int8_t_sums_int64,\n w_scales,\n out_dtype,\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quant_int8_dynamic_per_token_linear", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quant_int8_dynamic_per_token_linear(\n x,\n w_vals_int8_t,\n w_scales,\n bias,\n out_dtype,\n):\n # like F.linear, but with int8 dynamic quantization of activation,\n # and a quantized weight\n x_vals_int8, x_scales = quantize_activation_per_token_absmax(x)\n mm_out = quant_int8_per_token_matmul(\n x_vals_int8, x_scales, w_vals_int8_t, w_scales, out_dtype\n )\n if bias is not None:\n mm_out += bias\n return mm_out" }, { "identifier": "quantize_activation_per_token_absmax", "path": "torchao/quantization/quant_primitives.py", "snippet": "def quantize_activation_per_token_absmax(t):\n n_bits = 8\n # if the shape of t is [B, N, K], the shape of scales will be [B, N, 1]\n\n scales = t.abs().amax(dim=-1, keepdim=True)\n if scales.dtype == torch.float16:\n scales = (\n scales.float()\n ) # want float scales to avoid overflows for fp16, (bf16 has wide enough range)\n q_max = 2 ** (n_bits - 1) - 1\n scales = scales.clamp(min=1e-5).div(q_max)\n # Note: the original smoothquant does not clamp to qmin/qmax here,\n # but some of the tests with bfloat16 ended up with a flipped sign\n # if we don't clamp. TODO(future) look into this further.\n t = torch.round(t / scales).clamp(-127, 127).to(torch.int8)\n return t, scales" }, { "identifier": "safe_int_mm", "path": "torchao/quantization/quant_primitives.py", "snippet": "def safe_int_mm(input: torch.Tensor, mat2: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n This function wraps torch._int_mm and avoids several undesirable behaviors of the function for certain inputs while still\n returning correct results and being torch.compiled in a performant way.\n\n Assumes both tensors have dimension of 2.\n\n Note: no error checking for torch.compiled path, if input.shape = [i, j] and j<=16 then the triton kernel\n will error.\n\n Args:\n input (Tensor, int8): the first tensor to be multiplied\n mat2 (Tensor, int8): the second tensor to be multiplied\n\n Return:\n out (Tensor, int32): the result of the matmul with device matching that of the inputs\n \"\"\"\n\n # torch.compile path\n if dynamo_is_compiling() or \"FakeTensor\" in input.__repr__():\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)\n\n # error checking for cublas path\n assert (\n mat2.device == input.device\n ), f\"need both tensors to be on the same device but got {mat2.device} and {input.device}\"\n device_cpu = \"cpu\" in [mat2.device.type, input.device.type]\n # with input.shape = [i,j] and mat2.shape = [j,k]\n i_is_strictly_greater_than_16 = input.shape[0] > 16\n j_is_nonzero_multiple_of_8 = (input.shape[1] % 8 == 0) and (input.shape[1] > 0)\n k_is_nonzero_multiple_of_8 = (mat2.shape[1] % 8 == 0) and (mat2.shape[1] > 0)\n bad_dimensions_for_cublas = not (\n i_is_strictly_greater_than_16\n and j_is_nonzero_multiple_of_8\n and k_is_nonzero_multiple_of_8\n )\n\n if device_cpu or bad_dimensions_for_cublas:\n # fallback path\n return torch.matmul(input.cpu().to(torch.int32), mat2.cpu().to(torch.int32)).to(\n input.device.type\n )\n\n # cublas paths\n if not mat2.is_contiguous(): # silently gives incorrect result without this\n mat2 = mat2.contiguous()\n if (not input.is_contiguous()) and (\n input.shape[0] % 8 != 0\n ): # gives cryptic error without this\n input = (\n input.contiguous()\n ) # (it seems the transpose makes cublas check the above j constraint on i)\n return out_dtype(torch.ops.aten.mm.default, torch.int32, input, mat2)" }, { "identifier": "get_scale", "path": "torchao/quantization/smoothquant.py", "snippet": "def get_scale(X_absmax, W_absmax, alpha=0.5):\n \"\"\"\n Calculate the scale based on abs(max(X)), abs(max(W)) and alpha\n If X is of dimension `b*n*k` and W is dimension `k*m`, the returned\n scale is of dimension `k`.\n Note: X_absmax is calculated outside of this function because we\n need to keep a running version of it during calibration. W_absmax\n is calculated outside of this function for consistency with X_absmax.\n \"\"\"\n X_pow = torch.pow(X_absmax, alpha)\n W_pow = torch.pow(W_absmax, 1.0 - alpha)\n div = X_pow / W_pow\n return div.reshape(-1)" }, { "identifier": "smooth_fq_linear_to_inference", "path": "torchao/quantization/smoothquant.py", "snippet": "def smooth_fq_linear_to_inference(model, debug_skip_calibration=False) -> None:\n for _, mod in model.named_modules():\n if isinstance(mod, tuple(source_cls_to_target_cls.values())):\n if debug_skip_calibration:\n mod.set_debug_x_absmax()\n mod.to_inference()" }, { "identifier": "SmoothFakeDynamicallyQuantizedLinear", "path": "torchao/quantization/smoothquant.py", "snippet": "class SmoothFakeDynamicallyQuantizedLinear(SmoothFakeDynQuantMixin, torch.nn.Linear):\n \"\"\"\n This is a replacement for `torch.nn.Linear` which implements dynamic per-token\n activation quantization and dynamic per-channel weight quantization based on\n Smoothquant scaling.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n alpha = kwargs.pop(\"alpha\")\n super().__init__(*args, **kwargs)\n self.init_smoothquant_variables(alpha)\n\n def forward(self, X, *args, **kwargs):\n if self.calibrating:\n self.update_x_running_abs_max(X)\n Y = F.linear(X, self.weight, self.bias)\n else:\n if not self.debug_skip_scaling:\n # Ideally this would be fused into preceding layers\n # but in practice torch.compile fuses it with other\n # ops so the slowdown is minimal\n X = X / self.smooth_scale\n W_int_repr_t = (\n self.W_int_repr if self.store_w_int_repr_t else self.W_int_repr.t()\n )\n Y = quant_int8_dynamic_per_token_linear(\n X, W_int_repr_t, self.W_scales, self.bias, X.dtype\n )\n return Y\n\n @classmethod\n def from_float(cls, mod, alpha=0.5):\n \"\"\"\n Converts a `mod` of class `torch.nn.Linear` to the smooth fake quantized\n version of it. Note: requires calibration.\n \"\"\"\n # create the new module with a toy size to ensure initialization is fast\n fake_in_features, fake_out_features = 8, 8\n new_mod = cls(\n fake_in_features, fake_out_features, bias=mod.bias is not None, alpha=alpha\n )\n new_mod.in_features = mod.in_features\n new_mod.out_features = mod.out_features\n new_mod.weight = mod.weight\n new_mod.bias = mod.bias\n # TODO: test when creation is on cuda\n device_to_use = next(mod.parameters()).device\n new_mod.to(device_to_use)\n return new_mod\n\n def to_inference(self):\n \"\"\"\n Calculates the smoothquant scale based on calibration\n in preparation for inference\n \"\"\"\n assert self.x_running_abs_max is not None, \"no calibration data found\"\n self.calibrating = False\n self.smooth_scale = get_scale(\n self.x_running_abs_max,\n torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values,\n alpha=self.alpha,\n )\n self.fold_weight()\n\n def set_debug_x_absmax(self):\n w_absmax = torch.max(torch.abs(self.weight.transpose(0, 1)), dim=1).values\n self.x_running_abs_max = w_absmax" }, { "identifier": "swap_linear_with_smooth_fq_linear", "path": "torchao/quantization/smoothquant.py", "snippet": "def swap_linear_with_smooth_fq_linear(\n model, skip_fqn_list=None, cur_fqn=\"\", alpha=0.5\n) -> None:\n\n name_to_child = dict(model.named_children())\n for name, child in name_to_child.items():\n if cur_fqn == \"\":\n new_fqn = name\n else:\n new_fqn = f\"{cur_fqn}.{name}\"\n if ((skip_fqn_list is None) or (new_fqn not in skip_fqn_list)) and (\n type(child) in source_cls_to_target_cls.keys()\n ):\n target_cls = source_cls_to_target_cls[type(child)]\n new_child = target_cls.from_float(child, alpha=alpha)\n setattr(model, name, new_child)\n else:\n swap_linear_with_smooth_fq_linear(child, skip_fqn_list, new_fqn, alpha)" }, { "identifier": "Int8DynamicallyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8DynamicallyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module, changes the\n linear op to a dynamically quantized linear op with symmetric per-token and per-channel\n quantization on the activation and weight respectively.\n \"\"\"\n\n @staticmethod\n def __new__(cls, int_data, q_scales, transposed, shape, **kwargs):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", q_scales.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(self, int_data, q_scales, transposed, shape, **kwargs):\n self.q_scales = q_scales\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n return quant_int8_dynamic_per_token_linear(\n act_mat, w_qtensor.int_data, w_qtensor.q_scales, bias, act_mat.dtype\n )\n\n def dequantize(self, dtype=None):\n \"\"\"\n Obtain the dequantized version of the quantized tensor subclass\n \"\"\"\n dq_t = dequantize_per_channel(\n self.int_data.t(), self.q_scales, 0, self.dtype if dtype is None else dtype\n ).to(self.dtype)\n # data was transposed to dequantize so make sure shape is correct\n return dq_t if not self.transposed else dq_t.t()\n\n def int_repr(self):\n \"\"\"\n Get the internal integer representation of the quantized tensor\n \"\"\"\n return self.int_data if self.transposed else self.int_data.t()\n\n def q_params(self):\n \"\"\"\n Get the quantization scales for the quantized tensor\n \"\"\"\n return {\"q_scales\": self.q_scales}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.q_scales.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data), fn(self.q_scales), self.transposed, self.shape, dtype=self.dtype\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data, self.q_scales, self.transposed, shape, dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"q_scales\"], [self.transposed, self.dtype, self.shape]\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, tensor_attributes, outer_size=None, outer_stride=None):\n int_data, q_scales = tensor_data_dict[\"int_data\"], tensor_data_dict[\"q_scales\"]\n transposed, dtype, shape = tensor_attributes\n return cls(int_data, q_scales, transposed, shape if outer_size is None else outer_size, dtype=dtype, strides=outer_stride)\n\n @classmethod\n def from_float(cls, input_float, qmin=-128, qmax=127):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int8DynamicallyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int8DynamicallyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n w_int_repr, w_scales, _ = dynamically_quantize_per_channel(\n input_float, qmin, qmax, torch.int8\n )\n # the desired representation shape for fast quantized matmul is\n # transposed compared to how it's stored as a linear weight,\n # i.e. we want in_channels as dim=0 and out_channels (and quantized axis) as dim=1\n # however the external representation of our tensor will maintain the correct\n # shape attribute which needs to be tracked directly.\n int_data = w_int_repr.contiguous().t()\n if cls is not Int8DynamicallyQuantizedLinearWeight:\n int_data = int_data.contiguous()\n return cls(\n int_data, w_scales, False, input_float.shape, dtype=input_float.dtype\n )" }, { "identifier": "Int8WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int8WeightOnlyQuantizedLinearWeight(Int8DynamicallyQuantizedLinearWeight):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes the linear op to a weight-only quantized linear op with symmetric\n per-channel quantization on the weight.\n \"\"\"\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_dtype = act_mat.dtype\n y = torch.mm(act_mat.reshape(-1, act_mat.shape[-1]), w_qtensor.int_data.to(act_mat.dtype)) * w_qtensor.q_scales\n y = y.reshape(*act_mat.shape[:-1], y.shape[-1])\n if bias is not None:\n y += bias\n return y.to(orig_dtype)" }, { "identifier": "Int4WeightOnlyQuantizedLinearWeight", "path": "torchao/quantization/subclass.py", "snippet": "class Int4WeightOnlyQuantizedLinearWeight(QuantizedLinearWeightBase):\n \"\"\"\n A Tensor subclass that when applied to a weight used in a linear op/module,\n changes that linear op to a weight-only int4 quantized linear op with groupwise\n affine quantization on the weight.\n \"\"\"\n\n @staticmethod\n def __new__(\n cls,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize=128,\n inner_k_tiles=8,\n **kwargs,\n ):\n kwargs[\"dtype\"] = kwargs.get(\"dtype\", scales_and_zeros.dtype)\n return super().__new__(cls, int_data, transposed, shape, **kwargs) # type: ignore[attr-defined]\n\n def __init__(\n self,\n int_data,\n scales_and_zeros,\n transposed,\n shape,\n groupsize,\n inner_k_tiles,\n **kwargs,\n ):\n # the transposed flag tracks whether the tensor subclass has been transposed relative\n # to how a weight is normally stored in a linear i.e. [out_features, in_features].\n # tracking both transposed and shape is slightly redundant but corner cases like\n # square matrices can cause issues otherwise\n self.scales_and_zeros = scales_and_zeros\n self.groupsize = groupsize\n self.inner_k_tiles = inner_k_tiles\n super().__init__(int_data, transposed)\n\n @staticmethod\n def _quantized_op(act_mat, w_qtensor, bias):\n orig_act_size = act_mat.size()\n orig_dtype = act_mat.dtype\n\n # reshape and pad activation\n act_mat = act_mat.reshape(-1, act_mat.shape[-1]).to(torch.bfloat16)\n pad_size = find_multiple(act_mat.shape[-1], 1024)\n act_mat = torch.nn.functional.pad(act_mat, (0, pad_size - act_mat.shape[-1]))\n\n # matmul\n y = aten._weight_int4pack_mm(\n act_mat.contiguous(), w_qtensor.int_data, w_qtensor.groupsize, w_qtensor.scales_and_zeros\n )\n\n # remove out_feature padding\n orig_out_features = w_qtensor.shape[-1] if w_qtensor.transposed else w_qtensor.shape[-2]\n y = y[:, :orig_out_features]\n\n y = y.reshape(*orig_act_size[:-1], orig_out_features)\n if bias is not None:\n y += bias\n return y.to(orig_dtype)\n\n def dequantize(self):\n eye_shape = self.shape[1] if not self.transposed else self.shape[0]\n w_dq = self._quantized_op(\n torch.eye(eye_shape, device=self.device, dtype=self.dtype), self, None\n )\n # we dequantized using linear with the identity matrix, output has shape [in_channels, out_channels]\n # so we need to transpose back to get the original shape unless self.transposed is set.\n w_dq = w_dq if self.transposed else w_dq.t()\n return w_dq.to(self.dtype)\n\n def int_repr(self):\n return self.int_data\n\n def q_params(self):\n scales, zero_points = unpack_tinygemm_scales_and_zeros(\n self.scales_and_zeros,\n )\n return {\"q_scales\": scales, \"q_zero_points\": zero_points}\n\n def to(self, *args, **kwargs):\n kwargs = self._get_to_kwargs(*args, **kwargs)\n return self.__class__(\n self.int_data.to(kwargs[\"device\"]),\n self.scales_and_zeros.to(kwargs[\"device\"]),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n **kwargs,\n )\n\n def _apply_fn_to_data(self, fn):\n return self.__class__(\n fn(self.int_data),\n fn(self.scales_and_zeros),\n self.transposed,\n self.shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype,\n )\n\n def _change_shape(self, shape):\n return self.__class__(\n self.int_data,\n self.scales_and_zeros,\n self.transposed,\n shape,\n self.groupsize,\n self.inner_k_tiles,\n dtype=self.dtype\n )\n\n def __tensor_flatten__(self):\n return [\"int_data\", \"scales_and_zeros\"], (\n self.transposed,\n self.groupsize,\n self.inner_k_tiles,\n self.dtype,\n self.shape\n )\n\n @classmethod\n def __tensor_unflatten__(cls, tensor_data_dict, attributes, outer_size=None, outer_stride=None):\n int_data, scales_and_zeros = (\n tensor_data_dict[\"int_data\"],\n tensor_data_dict[\"scales_and_zeros\"],\n )\n transposed, groupsize, inner_k_tiles, dtype, shape = attributes\n return cls(\n int_data,\n scales_and_zeros,\n transposed,\n shape if outer_size is None else outer_size,\n groupsize,\n inner_k_tiles,\n dtype=dtype,\n strides=outer_stride,\n )\n\n @classmethod\n def from_float(cls, input_float, groupsize=128, inner_k_tiles=8):\n \"\"\"\n Method used to convert a linear weight tensor to an instance of the\n Int4WeightOnlyQuantizedLinearWeight subclass.\n\n Example usage::\n\n model.lin_mod.weight = (\n Int4WeightOnlyQuantizedLinearWeight.from_float(model.lin_mod.weight)\n )\n \"\"\"\n assert groupsize in [256, 128, 64, 32]\n assert inner_k_tiles in [8, 4, 2]\n orig_shape = input_float.shape\n orig_out_features, orig_in_features = input_float.shape\n\n # padding\n in_features = find_multiple(orig_in_features, 1024)\n out_features = find_multiple(orig_out_features, 8)\n input_float = torch.nn.functional.pad(\n input_float, (0, in_features - orig_in_features, 0, out_features - orig_out_features)\n )\n\n # quantization and packing\n input_int4x8, scales_and_zeros = groupwise_affine_quantize_tensor(\n input_float, 4, groupsize\n )\n int_data = aten._convert_weight_to_int4pack(\n input_int4x8, inner_k_tiles\n )\n\n return cls(\n int_data,\n scales_and_zeros,\n False,\n orig_shape,\n groupsize,\n inner_k_tiles,\n dtype=input_float.dtype,\n )" }, { "identifier": "_apply_logging_hook", "path": "torchao/quantization/utils.py", "snippet": "def find_multiple(n: int, k: int) -> int:\ndef compute_error(x, y):\ndef _get_logging_hook(fqn):\n def forward_hook(module, input):\ndef _apply_logging_hook(model):\n def __torch_dispatch__(self, func, types, args=(), kwargs=None):\ndef get_model_size_in_bytes(model):\nclass LoggingTensorMode(TorchDispatchMode):" } ]
import copy import unittest import torch import torch.nn as nn import os from torch._inductor.utils import run_and_get_code from torch._dynamo import config from torch.ao.quantization import MinMaxObserver, QConfigMapping from torchao.quantization.dynamic_quant import ( DynamicallyPerAxisQuantizedLinear, ) from torchao.quantization.quant_api import ( apply_dynamic_quant, apply_weight_only_int8_quant, change_linear_weights_to_int8_dqtensors, change_linear_weights_to_int8_woqtensors, change_linear_weights_to_int4_woqtensors, _replace_with_custom_fn_if_matches_filter, ) from torchao.quantization.quant_primitives import ( dequantize_per_channel, dequantize_per_tensor, dynamically_quantize_per_channel, dynamically_quantize_per_tensor, quant_int8_dynamic_linear, quant_int8_dynamic_per_token_linear, quantize_activation_per_token_absmax, safe_int_mm, ) from torchao.quantization.smoothquant import ( get_scale, smooth_fq_linear_to_inference, SmoothFakeDynamicallyQuantizedLinear, swap_linear_with_smooth_fq_linear, ) from torchao.quantization.subclass import ( Int8DynamicallyQuantizedLinearWeight, Int8WeightOnlyQuantizedLinearWeight, Int4WeightOnlyQuantizedLinearWeight ) from torchao.quantization.utils import ( _apply_logging_hook, compute_error, compute_error as SQNR, _fqn_to_op_to_shape_to_count, LoggingTensorMode, ) from torch.ao.quantization.quantize_fx import convert_to_reference_fx, prepare_fx from transformers import ( # type: ignore[import-untyped] DistilBertModel, DistilBertTokenizer, )
11,283
x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater( SQNR(w, lin.weight.dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed dtype={test_dtype}" ) self.assertGreater( SQNR(w.t(), lin.weight.t().dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed transpose on dtype={test_dtype}" ) def test_dequantize_int8_dynamic_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl( Int8DynamicallyQuantizedLinearWeight.from_float, 35, test_dtype ) def test_dequantize_int8_weight_only_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl(
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # mypy: ignore-errors torch.manual_seed(0) config.cache_size_limit = 100 class SmoothquantUnitTest(unittest.TestCase): # first, let's reproduce the graphic from the paper, Figure 4, to ensure # we are calculating the scales correctly def test_figure_4(self): X = torch.FloatTensor([1, -16, 2, 6, -2, 8, -1, -9]).reshape(1, 2, 4) W = torch.FloatTensor([2, 1, -2, 1, -1, -1, 2, -1, -2, -1, -1, 1]).reshape(4, 3) X_mul_W = torch.matmul(X, W) smoothquant_scale = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) # reproduce scaled calculation X_scaled = X / smoothquant_scale.reshape(1, 1, -1) W_scaled = torch.matmul(torch.diag(smoothquant_scale), W) X_scaled_mul_scaled_W = torch.matmul(X_scaled, W_scaled) assert torch.allclose(X_mul_W, X_scaled_mul_scaled_W), "not close!" assert X_mul_W.shape == X_scaled_mul_scaled_W.shape # next, run the above test on a sample of representative inputs def test_tensors(self): x_shape = (1, 5, 7) w_shape = (7, 9) for i in range(3): X = torch.randn(x_shape) * 10 W = torch.randn(w_shape) s = get_scale( torch.amax(torch.abs(X), dim=(0, 1)), torch.amax(torch.abs(W), dim=1), alpha=0.5, ) Y = torch.matmul(X, W) Y_ref = torch.matmul( X / s.reshape(1, 1, -1), torch.matmul(torch.diag(s), W), ) assert torch.allclose(Y, Y_ref, atol=1e-3, rtol=1e-3), "not close!" def _test_smooth_linear_impl(self, x_shape, lin_shape, device): # so we can use the full range torch.backends.quantized.engine = "qnnpack" x = torch.randn(*x_shape, device=device) * 9 + 10 lin_fp32 = nn.Linear(*lin_shape, device=device) # misc: ignore lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_smooth_skip_scaling = SmoothFakeDynamicallyQuantizedLinear.from_float( copy.deepcopy(lin_fp32), alpha=0.25 ) lin_fp32_copy = copy.deepcopy(lin_fp32) # assignment: ignore lin_fp32_copy.qconfig = torch.ao.quantization.QConfig( # assignment: ignore activation=None, weight=torch.ao.quantization.default_per_channel_weight_observer, ) lin_dynamic_q = torch.ao.nn.quantized.dynamic.Linear.from_float( lin_fp32_copy.cpu() ) y_ref = lin_fp32(x) # calibrate the smoothquant versions y_smooth_nocalib = lin_smooth(x) _ = lin_smooth_skip_scaling(x) lin_smooth.to_inference() lin_smooth_skip_scaling.debug_skip_scaling = True lin_smooth_skip_scaling.to_inference() # verify that with scaling turned off, numerics match quantized version y_smooth_fq_only = lin_smooth_skip_scaling(x) y_smooth_fq = lin_smooth(x) y_dynamic_q = lin_dynamic_q(x.cpu()).to(device) # print('y_ref', y_ref) # print('y_smooth_nocalib', y_smooth_nocalib) # print('y_smooth_fq', y_smooth_fq) # print('y_smooth_fq_only', y_smooth_fq_only) # print('y_dynamic_q', y_dynamic_q) sqnr_smooth_fq = compute_error(y_ref, y_smooth_fq) sqnr_dynamic_q = compute_error(y_ref, y_dynamic_q) sqnr_fq = compute_error(y_smooth_fq_only, y_dynamic_q) # print('sqnr_smooth', sqnr_smooth_fq, 'sqnr_dynamic', sqnr_dynamic_q, 'sqnr_fq', sqnr_fq) assert torch.allclose( y_ref, y_smooth_nocalib ), "y_ref not close to y_smooth_nocalib" # after https://github.com/pytorch-labs/ao_benchmarks/pull/32, # numerics do not match exactly between production c++ code # and this Python code # assert torch.allclose( # y_smooth_fq_only, y_dynamic_q, # atol=torch.max(y_smooth_fq_only).item()*0.01, # rtol=0.00001), \ # 'y_smooth_fq_only not close to y_dynamic_q' self.assertTrue(sqnr_smooth_fq.item() >= 40.0) self.assertTrue(sqnr_dynamic_q.item() >= 40.0) self.assertTrue(sqnr_fq.item() >= 40.0) def test_smooth_linear_cpu(self): self._test_smooth_linear_impl((1, 5, 3), (3, 4), "cpu") def test_smooth_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return self._test_smooth_linear_impl((1, 32, 32), (32, 16), "cuda") def test_smooth_linear_edge_cases(self): # so we can use the full range torch.backends.quantized.engine = "qnnpack" lin_fp32 = nn.Linear(3, 4) lin_smooth = SmoothFakeDynamicallyQuantizedLinear.from_float( lin_fp32, alpha=0.25 ) # test different ranks x0 = torch.randn(4, 5, 3) x1 = torch.randn(1, 8, 5, 3) x2 = torch.randn(2, 3, 7, 5, 3) # calibrate _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) # inference lin_smooth.to_inference() _ = lin_smooth(x0) _ = lin_smooth(x1) _ = lin_smooth(x2) def test_swap(self): m = nn.Sequential( nn.Sequential(nn.Linear(4, 4), nn.ReLU(), nn.Linear(4, 4)), nn.Linear(4, 4), ) m_copy = copy.deepcopy(m) swap_linear_with_smooth_fq_linear(m_copy, skip_fqn_list=["0.2"]) # verify all linears are swapped assert isinstance(m_copy[0][0], SmoothFakeDynamicallyQuantizedLinear) assert isinstance(m_copy[0][1], nn.ReLU) # this one was skipped assert isinstance(m_copy[0][2], nn.Linear) assert isinstance(m_copy[1], SmoothFakeDynamicallyQuantizedLinear) # verify results do not change without smoothing x = torch.randn(4, 4) y_ref = m(x) y = m_copy(x) assert torch.allclose(y_ref, y) def test_weight_t_and_non_t_numerics_match(self): # verify that numerics match whether weight is stored # in transposed format (for cuBLAS) vs non-transposed format # (for torch.compile) if not torch.cuda.is_available(): print("no cuda, skip") return dtype = torch.half device = "cuda" lin_ref = nn.Linear(32, 16, dtype=dtype, device=device) lin_eager_t = copy.deepcopy(lin_ref) lin_opt_t = copy.deepcopy(lin_eager_t) lin_opt = copy.deepcopy(lin_eager_t) lin_eager_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_eager_t) lin_opt_t = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt_t) lin_opt = SmoothFakeDynamicallyQuantizedLinear.from_float(lin_opt) lin_opt.store_w_int_repr_t = False x = torch.randn(32, 32, dtype=dtype, device=device) y_calib_eager_t = lin_eager_t(x) y_calib_opt_t = lin_opt_t(x) y_calib_opt = lin_opt(x) torch.testing.assert_close(y_calib_eager_t, y_calib_opt_t) torch.testing.assert_close(y_calib_eager_t, y_calib_opt) lin_eager_t.to_inference() lin_opt_t.to_inference() lin_opt.to_inference() torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt_t.W_int_repr) torch.testing.assert_close(lin_eager_t.W_int_repr, lin_opt.W_int_repr) lin_opt_t = torch.compile(lin_opt_t, mode="max-autotune") lin_opt = torch.compile(lin_opt, mode="max-autotune") y_ref = lin_ref(x) y_eager = lin_eager_t(x) y_opt_t = lin_opt_t(x) y_opt = lin_opt(x) if not torch.any(torch.isinf(y_ref)) and torch.any(torch.isinf(y_eager)): # eager mode torch._int_mm is sometimes buggy, when this happens # we can't really compare the compiled version against it properly print("eager mode torch._int_mm known bad, test is inconclusive") return sqnr_ref_eager = compute_error(y_ref, y_eager) sqnr_eager_opt_t = compute_error(y_eager, y_opt_t) sqnr_eager_opt = compute_error(y_eager, y_opt) # since torch.compile for a torch.half model can # change numerics significantly, we can only test for a high SQNR here # and not for closeness self.assertTrue(sqnr_eager_opt_t >= 45.0) self.assertTrue(sqnr_eager_opt >= 45.0) # y_opt_t and y_opt should be equivalent torch.testing.assert_close(y_opt_t, y_opt) def test_selective_torch_compile(self): m = nn.Sequential( nn.Linear(4, 4), nn.Sequential( nn.Linear(4, 4), nn.Linear(4, 4), ), nn.Linear(4, 4), ) x = torch.randn(4, 4) y_ref = m(x) _replace_with_custom_fn_if_matches_filter( m, lambda mod: torch.compile(mod), lambda mod, fqn: isinstance(mod, nn.Linear) and fqn != "1.0", ) self.assertTrue(isinstance(m[0], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[1][0], nn.Linear)) self.assertTrue(isinstance(m[1][1], torch._dynamo.eval_frame.OptimizedModule)) self.assertTrue(isinstance(m[2], torch._dynamo.eval_frame.OptimizedModule)) y = m(x) torch.testing.assert_close(y, y_ref) def test_debug_x_absmax(self): m = nn.Sequential(nn.Linear(3, 4)) x0 = torch.randn(4, 5, 3) y0 = m(x0) swap_linear_with_smooth_fq_linear(m) # no calibration, straight to inference, should not crash smooth_fq_linear_to_inference(m, debug_skip_calibration=True) y1 = m(x0) class PythonQuantPrimitivesUnitTest(unittest.TestCase): def _test_dynamic_quant_per_tensor_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device, qscheme ): x = torch.randn(256, dtype=float_dtype, device=device) y_vals, y_scale, y_zero_point = dynamically_quantize_per_tensor( x, qmin, qmax, int_dtype, qscheme ) # reference # quantize_per_tensor_dynamic doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x # quantize_per_tensor_dynamic doesn't support qscheme, so we just do dynamic # quant manually with observers + static quant obs = MinMaxObserver( dtype=qint_dtype, qscheme=qscheme, quant_min=qmin, quant_max=qmax ).to(device) obs(x_for_ref) ref_scale, ref_zero_point = obs.calculate_qparams() y_ref = torch.quantize_per_tensor( x_for_ref, ref_scale, ref_zero_point, qint_dtype ) # y_ref = torch.quantize_per_tensor_dynamic(x_for_ref, qint_dtype, False) # print(y_ref) if float_dtype == torch.float: assert torch.equal(y_vals, y_ref.int_repr()) else: # numerics are not exactly aligned yet, off-by-one probably due # to rounding assert torch.max(torch.abs(y_vals - y_ref.int_repr())).item() <= 1 torch.testing.assert_close( y_scale, torch.tensor([y_ref.q_scale()], device=device, dtype=float_dtype) ) if y_zero_point is not None: assert torch.equal( y_zero_point, torch.tensor([y_ref.q_zero_point()], device=device) ) else: self.assertTrue(y_ref.q_zero_point() == 0) # dequantize and check again x_dq = dequantize_per_tensor(y_vals, y_scale, y_zero_point, float_dtype) y_ref_dq = y_ref.dequantize().to(float_dtype) if float_dtype == torch.float: torch.testing.assert_close(x_dq, y_ref_dq) else: sqnr = compute_error(x_dq, y_ref_dq) self.assertTrue(sqnr.item() > 45.0) def test_dynamic_quant_per_tensor_numerics_cpu(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu test_cases = ( ( 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cpu", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def test_dynamic_quant_per_tensor_numerics_cuda(self): # verifies that dynamic quant per tensor in plain pytorch matches # numerics of production AO code if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_affine, ), ( -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float32, "cuda", torch.per_tensor_symmetric, ), ( -127, 127, torch.int8, torch.qint8, torch.float16, "cuda", torch.per_tensor_symmetric, ), ) for row in test_cases: self._test_dynamic_quant_per_tensor_numerics_impl(*row) def _test_dynamic_quant_per_channel_numerics_impl( self, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): # verifies that dynamic quant per channel in plain pytorch matches # numerics of production AO code # TODO(future): test this on cpu-half, need to first make # torch.aminmax support half on cpu x = torch.randn(16, 32, device=device, dtype=float_dtype) y_vals, y_scale, y_zero_point = dynamically_quantize_per_channel( x, qmin, qmax, int_dtype ) min_val, max_val = torch.aminmax(x, dim=1) # reference weight_obs = torch.ao.quantization.MovingAveragePerChannelMinMaxObserver( dtype=qint_dtype, quant_min=qmin, quant_max=qmax, qscheme=torch.per_channel_symmetric, averaging_constant=1.0, # make it ignore previous iterations ) weight_obs(x) y_ref_scale, y_ref_zp = weight_obs.calculate_qparams() y_ref_scale = y_ref_scale.to(device) y_ref_zp = y_ref_zp.to(device) # quantize_per_channel doesn't work for half, so we cast there and back x_for_ref = x.half().float() if float_dtype == torch.float16 else x y_ref = torch.quantize_per_channel( x_for_ref, y_ref_scale, y_ref_zp, 0, qint_dtype ) torch.testing.assert_close( y_scale, y_ref.q_per_channel_scales().to(float_dtype) ) assert torch.equal(y_zero_point, y_ref.q_per_channel_zero_points()) # this test case has one element where the rounding is off by one # from Python-only code vs the c++ code, it's easy to repro with # various shapes. # Discussion here is relevant: https://github.com/pytorch/pytorch/issues/16498 # TODO(future): figure out what to do about this # assert torch.equal(int_vals, q_reference.int_repr()) assert torch.max(torch.abs(y_vals - y_ref.int_repr())) <= 1 # dequantize x_dq = dequantize_per_channel(y_vals, y_scale, y_zero_point) x_ref_dq = y_ref.dequantize() # off-by-one for scale is okay torch.testing.assert_close( x_dq, x_ref_dq, atol=torch.max(y_scale).item() * 1.01, rtol=0.0001 ) def test_dynamic_quant_per_channel_numerics_cpu(self): test_cases = ((-128, 127, torch.int8, torch.qint8, torch.float32, "cpu"),) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def test_dynamic_quant_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( (-128, 127, torch.int8, torch.qint8, torch.float32, "cuda"), (-128, 127, torch.int8, torch.qint8, torch.float16, "cuda"), ) for row in test_cases: self._test_dynamic_quant_per_channel_numerics_impl(*row) def _test_quantize_per_token_impl(self, device, dtype): x = torch.randn(3, 3, 3, device=device, dtype=dtype) xq, scales = quantize_activation_per_token_absmax(x) x_dq = dequantize_per_tensor(xq, scales, None).to(x.dtype) sqnr = compute_error(x, x_dq) self.assertTrue(sqnr >= 45.0) def test_quantize_per_token_cpu(self): for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cpu", dtype) def test_quantize_per_token_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_quantize_per_token_impl("cuda", dtype) def _test_per_token_linear_impl(self, device, dtype): x = torch.randn(2, 16, 8, device=device, dtype=dtype) w = torch.randn(16, 8, device=device, dtype=dtype) wq, w_scales, _w_zp = dynamically_quantize_per_channel(w, -127, 127, torch.int8) # Note: need to make the weight contiguous because we are # testing in eager mode and cuBlas will not give correct results # for a transposed weight y = quant_int8_dynamic_per_token_linear( x, wq.t().contiguous(), w_scales, None, dtype ) y_ref = torch.matmul(x, w.t()) sqnr = compute_error(y_ref, y) self.assertTrue(sqnr >= 42.0) def test_per_token_linear_cpu(self): for dtype in (torch.float32,): self._test_per_token_linear_impl("cpu", dtype) def test_per_token_linear_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return for dtype in (torch.float32, torch.float16, torch.bfloat16): self._test_per_token_linear_impl("cuda", dtype) def test__int_mm(self): # TODO(future): figure out what here needs to move to PT core, # if it's not already tested there if not torch.cuda.is_available(): print("no cuda, skip") return m, k, n = 32, 32, 16 x = torch.randint(-128, 127, (m, k), dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, (k, n), dtype=torch.int8, device="cuda") y_ref = torch.matmul(x.float(), w.float()).to(torch.int32) y_raw = safe_int_mm(x, w) wrap_in_mm_opt = torch.compile(safe_int_mm, mode="max-autotune") # note: triton chokes on the line below on k == 8 and n == 8 with # https://www.internalfb.com/phabricator/paste/view/P683467944 # TODO(future): file an issue y_opt = wrap_in_mm_opt(x, w) torch.testing.assert_close(y_ref, y_raw, atol=0, rtol=0) torch.testing.assert_close(y_ref, y_opt, atol=0, rtol=0) def test__int_mm_eager_and_torch_compile_numerics(self): if not torch.cuda.is_available(): print("no cuda, skip") return def __int_mm_ref(x, w): x = x.cpu().to(torch.int32) w = w.cpu().to(torch.int32) y = torch.matmul(x, w) return y.cuda() shapes = ( # minimal test shape ((1, 32, 32), (32, 16)), # paste of real linear shapes from LLaMa 1.5b ((17, 1, 1536), (1536, 1536)), ((17, 8, 4096), (4096, 1536)), ((17, 1, 1536), (1536, 4096)), ((17, 8, 1536), (1536, 1536)), ((17, 1, 4096), (4096, 1536)), ((17, 8, 1536), (1536, 4096)), ) for x_shape, w_shape in shapes: def wrap_torch_int_mm(x, w): b, n, k = x.shape k, m = w.shape x = x.reshape(b * n, k) res = safe_int_mm(x, w) res = res.reshape(b, n, m) return res wrap_torch_int_mm_opt = torch.compile( wrap_torch_int_mm, mode="max-autotune" ) x = torch.randint(-128, 127, x_shape, dtype=torch.int8, device="cuda") w = torch.randint(-128, 127, w_shape, dtype=torch.int8, device="cuda") z_ref = __int_mm_ref(x, w) z_eager = wrap_torch_int_mm(x, w) z_torch_compile = wrap_torch_int_mm_opt(x, w) # print(z_ref) # print(z_eager) # print(z_torch_compile) torch.testing.assert_close(z_ref, z_eager, atol=0, rtol=0) torch.testing.assert_close(z_ref, z_torch_compile, atol=0, rtol=0) def _test_qlinear_per_channel_numerics( self, x_shape, lin_shape, qmin, qmax, int_dtype, qint_dtype, float_dtype, device ): qconfig = torch.ao.quantization.per_channel_dynamic_qconfig x = torch.randn(*x_shape, device=device, dtype=float_dtype) # TODO: test bias true and false # Note: reference path only works on float because lack of aten quant primitives # support of half, so we cast back and forth to emulate lin_ref = ( nn.Sequential(nn.Linear(*lin_shape)) .eval() .to(float_dtype) .float() .to(device) ) y_ref = lin_ref(x.float()) weight = lin_ref[0].weight bias = lin_ref[0].bias qconfig_mapping = QConfigMapping().set_global(qconfig) lin_ref_p = prepare_fx(lin_ref, qconfig_mapping, (torch.randn(1, 1),)) lin_ref_q = convert_to_reference_fx(lin_ref_p) y_q_ref = lin_ref_q(x.float()) # scale, zp of weight (get from reference model) w_obs = qconfig.weight() w_obs(weight) lin_ref_w_scale, lin_ref_w_zp = w_obs.calculate_qparams() lin_ref_w_scale = lin_ref_w_scale.to(device).to(float_dtype) # print('lin_ref_w', 'scale', lin_ref_w_scale, 'zp', lin_ref_w_zp) w_vals, _s, _z = dynamically_quantize_per_channel( getattr(lin_ref_q, "0").weight.to(float_dtype), -128, 127, torch.int8 ) w_vals = w_vals.t().contiguous() w_vals_sums = w_vals.sum(dim=0) # do our version of the quantized linear operator y = quant_int8_dynamic_linear( x, qmin, qmax, int_dtype, w_vals, lin_ref_w_scale, w_vals_sums, bias, float_dtype, ) # print('y', y) # print('y_q_ref', y_q_ref) # print('y_ref', y_ref) sqnr_ref = compute_error(y_ref, y_q_ref) sqnr_our = compute_error(y_ref, y) # print('sqnr_ref', sqnr_ref, 'sqnr_our', sqnr_our) # for large shapes, sqnr can be in the high 30s for float32 and float16 self.assertTrue(sqnr_our.item() >= 37.5) def test_qlinear_per_channel_numerics_cpu(self): # Note: the AO codebase doesn't easily support qint8 activations, # so the test cases below are for the quant primitives defined in # this file only. The AO reference is using quint8 here. test_cases = ( ((2, 3), (3, 4), 0, 255, torch.uint8, torch.quint8, torch.float32, "cpu"), ((2, 3), (3, 4), -128, 127, torch.int8, torch.qint8, torch.float32, "cpu"), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) def test_qlinear_per_channel_numerics_cuda(self): if not torch.cuda.is_available(): print("no cuda, skip") return test_cases = ( # Note: torch._int_mm needs int8 activations, so we don't test uint8 # activations on CUDA at all ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (32, 32), (32, 16), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), # a large shape from LLaMa 1.5B - currently fails for float16 ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float32, "cuda", ), ( (17, 4096), (4096, 1536), -128, 127, torch.int8, torch.qint8, torch.float16, "cuda", ), ) for test_case in test_cases: self._test_qlinear_per_channel_numerics(*test_case) class TestSubclass(unittest.TestCase): def _test_dequantize_impl( self, test_subclass_from_float, min_sqnr=35, test_dtype=torch.bfloat16, test_shape=(32, 64, 64), ): m, k, n = test_shape lin = torch.nn.Linear(k, n, device="cuda").to(test_dtype) w = lin.weight.detach() lin.weight = torch.nn.Parameter( test_subclass_from_float(lin.weight), requires_grad=False ) self.assertGreater( SQNR(w, lin.weight.dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed dtype={test_dtype}" ) self.assertGreater( SQNR(w.t(), lin.weight.t().dequantize()), min_sqnr, f"{lin.weight.__class__.__name__} failed transpose on dtype={test_dtype}" ) def test_dequantize_int8_dynamic_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl( Int8DynamicallyQuantizedLinearWeight.from_float, 35, test_dtype ) def test_dequantize_int8_weight_only_quant_subclass(self): for test_dtype in [torch.float32, torch.float16, torch.bfloat16]: self._test_dequantize_impl(
Int8WeightOnlyQuantizedLinearWeight.from_float, 35, test_dtype
20
2023-11-03 21:27:36+00:00
16k
codefuse-ai/Collinear-Constrained-Attention
train/trainer/atorch_trainer.py
[ { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "TASK2ID = {}\nID2TASK = {}\n L = args.num_hidden_layers\n V = args.vocab_size\ndef get_rank():\ndef get_local_rank():\ndef is_main_process():\ndef is_local_main_process():\ndef print_rank_0(*message):\ndef get_world_size():\ndef wait_for_everyone():\ndef atorch_init_distributed(backend=\"nccl\"):\ndef atorch_reset_distributed():\ndef _goes_first(is_main):\ndef get_model_params_num(model):\ndef main_process_first():\ndef unwrap_model(model):\ndef honor_type(obj, generator):\ndef recursively_apply(\n func,\n data,\n *args,\n test_type=lambda t: isinstance(t, torch.Tensor),\n error_on_other_type=False,\n **kwargs,\n):\ndef gather(tensor):\n def _gpu_gather_one(tensor):\ndef save_ckpt(model, optimizer, lr_scheduler, epoch, steps, save_path, logger):\ndef scheduler_and_resume(args, train_dataloader, model, optimizer, checkpoint):\ndef get_computation_speed(batch_size_per_device, seq_len, step_time):\ndef human_readable_flops(num):\ndef get_tflops_new(args, batch_size, seq_len, step_time):\ndef get_tflops_megatron(total_model_param, hidden_size, num_hidden_layers, \n batch_size_per_device, seq_len, step_time):\ndef is_old_version(path):\ndef generate_task_id(data_paths, train_mode):\n def __init__(self, patience=7, verbose=False, delta=0):\n def __call__(self, val_loss, model):\n def save_checkpoint(self, val_loss, model):\nclass EarlyStopping:" }, { "identifier": "FAMO", "path": "utils/auto_accelerate_utils.py", "snippet": "class FAMO:\n \"\"\"\n Fast Adaptive Multitask Optimization.\n \"\"\"\n def __init__(\n self,\n n_tasks: int,\n device: torch.device,\n mode: str = 'famo_valid',\n gamma: float = 0.001, # the regularization coefficient, default: 0.001\n w_lr: float = 0.025, # the learning rate of the task logits, default: 0.025\n max_norm: float = 1.0, # the maximum gradient norm\n ):\n self.min_losses = torch.zeros(n_tasks).to(device)\n self.w = torch.tensor([0.0] * n_tasks, device=device, requires_grad=True)\n self.w_opt = torch.optim.Adam([self.w], lr=w_lr, weight_decay=gamma)\n self.max_norm = max_norm\n self.n_tasks = n_tasks\n self.device = device\n self.first_train_step = True\n self.first_valid_step = True\n self.print_loss = None\n self.mode = mode\n self.prev_train_loss = None\n self.prev_valid_loss = None\n self.ratio_valid_task_loss_prev = torch.zeros(len(ID2TASK)).to(device)\n self.global_steps = 0\n self.z = None\n \n def set_min_losses(self, losses):\n self.min_losses = losses\n\n def get_weighted_loss(self, losses):\n self.prev_train_loss = losses\n self.z = F.softmax(self.w * 1, -1)\n # if is_main_process() and (self.global_steps % 10 == 0):\n # logger.info(f\"complete_steps: {self.global_steps}, per_task_weight: {self.z}\")\n if -1e20 in self.ratio_valid_task_loss_prev and self.mode == 'famo_valid_ema':\n self.z = F.softmax(torch.where(self.ratio_valid_task_loss_prev == -1e20, -1e20, self.z), -1)\n if self.global_steps % 10 == 0:\n print_rank_0(f'ratio_valid_task_loss_prev is {self.ratio_valid_task_loss_prev}, after, z is {self.z}')\n D = losses - self.min_losses + 1e-8\n if self.mode.startswith('famo_train'):\n c = (self.z / D).sum().detach()\n loss = (D.log() * self.z / c).sum()\n else:\n loss = (D * self.z).sum()\n return loss\n\n def update(self, curr_loss):\n if self.mode.startswith('famo_valid') and self.first_valid_step:\n self.first_valid_step = False\n self.prev_valid_loss = curr_loss\n return\n if self.mode.startswith('famo_train'):\n prev_loss = self.prev_train_loss\n else:\n prev_loss = self.prev_valid_loss\n self.prev_valid_loss = curr_loss\n delta = (prev_loss - self.min_losses + 1e-8).log() - \\\n (curr_loss - self.min_losses + 1e-8).log()\n with torch.enable_grad():\n d = torch.autograd.grad(F.softmax(self.w, -1),\n self.w,\n grad_outputs=delta.detach())[0]\n self.w_opt.zero_grad()\n self.w.grad = d\n self.w_opt.step()\n\n def backward(\n self,\n losses: torch.Tensor,\n shared_parameters: Union[\n List[torch.nn.parameter.Parameter], torch.Tensor\n ] = None,\n ):\n \"\"\"\n Parameters\n ----------\n losses :\n shared_parameters :\n task_specific_parameters :\n last_shared_parameters : parameters of last shared layer/block\n Returns\n -------\n Loss, extra outputs\n \"\"\"\n loss = self.get_weighted_loss(losses=losses)\n # if self.max_norm > 0 and shared_parameters is not None:\n # torch.nn.utils.clip_grad_norm_(shared_parameters, self.max_norm)\n # loss.backward()\n return loss" }, { "identifier": "get_ltor_masks_and_position_ids", "path": "utils/auto_accelerate_utils.py", "snippet": "def get_ltor_masks_and_position_ids(data):\n \"\"\"Build masks and position id for left to right model.\"\"\"\n\n # Extract batch size and sequence length.\n batch_size, seq_length = data.size()\n\n # Attention mask (lower triangular).\n # attention_mask = get_attn_mask(\n # seq_length=seq_length,\n # device=data.device,\n # )\n attention_mask = torch.ones((batch_size, seq_length), device=data.device)\n\n # Position ids.\n position_ids = torch.arange(seq_length, dtype=torch.long, device=data.device)\n position_ids = position_ids.unsqueeze(0).expand_as(data).clone()\n\n return attention_mask, position_ids" }, { "identifier": "SelfPacedStatus", "path": "utils/auto_accelerate_utils.py", "snippet": "class SelfPacedStatus:\n def __init__(self, interval=20):\n super(SelfPacedStatus, self).__init__()\n self.complete_steps = None\n self.current_epoch = None\n self.mode = None\n self.task_loss_prev = None\n self.w = None\n self.interval = interval\n \n def update(self, complete_steps, current_epoch, mode, task_loss_prev):\n self.complete_steps = complete_steps\n self.current_epoch = current_epoch\n self.mode = mode\n self.task_loss_prev = task_loss_prev" }, { "identifier": "GPTNeoXLayer", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXLayer(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.use_parallel_residual = config.use_parallel_residual\n self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)\n self.attention = GPTNeoXAttention(config)\n self.mlp = GPTNeoXMLP(config)\n\n def forward(\n self,\n hidden_states: Optional[torch.FloatTensor],\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = False,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n ):\n\n attention_layer_outputs = self.attention(\n self.input_layernorm(hidden_states),\n attention_mask=attention_mask,\n position_ids=position_ids,\n layer_past=layer_past,\n head_mask=head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n attn_output = attention_layer_outputs[0] # output_attn: attn_output, present, (attn_weights)\n outputs = attention_layer_outputs[1:]\n\n if self.use_parallel_residual:\n # pseudocode:\n # x = x + attn(ln1(x)) + mlp(ln2(x))\n mlp_output = self.mlp(self.post_attention_layernorm(hidden_states))\n hidden_states = mlp_output + attn_output + hidden_states\n else:\n # pseudocode:\n # x = x + attn(ln1(x))\n # x = x + mlp(ln2(x))\n attn_output = attn_output + hidden_states\n mlp_output = self.mlp(self.post_attention_layernorm(attn_output))\n hidden_states = mlp_output + attn_output\n\n if use_cache:\n outputs = (hidden_states,) + outputs # hidden_states, present, (attn_weights)\n else:\n outputs = (hidden_states,) + outputs[1:] # hidden_states, (attn_weights)\n\n return outputs" }, { "identifier": "GPTNeoXAttention", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.num_attention_heads = config.num_attention_heads\n self.hidden_size = config.hidden_size\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them\"\n )\n self.head_size = self.hidden_size // self.num_attention_heads\n self.rotary_ndims = int(self.head_size * config.rotary_pct)\n self._init_bias(config.max_position_embeddings)\n self.register_buffer(\"masked_bias\", torch.tensor(-1e9), persistent=False)\n self._init_rope()\n self.register_buffer(\n \"norm_factor\",\n torch.sqrt(torch.tensor(self.head_size, dtype=torch.float32)).to(torch.get_default_dtype()),\n persistent=False,\n )\n self.query_key_value = nn.Linear(config.hidden_size, 3 * config.hidden_size)\n self.dense = nn.Linear(config.hidden_size, config.hidden_size)\n\n def _init_bias(self, max_positions, device=None):\n self.register_buffer(\n \"bias\",\n torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(\n 1, 1, max_positions, max_positions\n ),\n persistent=False,\n )\n if device is not None:\n self.bias = self.bias.to(device)\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = GPTNeoXRotaryEmbedding(\n self.rotary_ndims, self.config.max_position_embeddings, base=self.config.rotary_emb_base\n )\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = GPTNeoXLinearScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = GPTNeoXDynamicNTKScalingRotaryEmbedding(\n self.rotary_ndims,\n self.config.max_position_embeddings,\n base=self.config.rotary_emb_base,\n scaling_factor=scaling_factor,\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: torch.FloatTensor,\n position_ids: torch.LongTensor,\n head_mask: Optional[torch.FloatTensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n use_cache: Optional[bool] = False,\n output_attentions: Optional[bool] = False,\n ):\n has_layer_past = layer_past is not None\n\n # Compute QKV\n # Attention heads [batch, seq_len, hidden_size]\n # --> [batch, seq_len, (np * 3 * head_size)]\n qkv = self.query_key_value(hidden_states)\n\n # [batch, seq_len, (num_heads * 3 * head_size)]\n # --> [batch, seq_len, num_heads, 3 * head_size]\n new_qkv_shape = qkv.size()[:-1] + (self.num_attention_heads, 3 * self.head_size)\n qkv = qkv.view(*new_qkv_shape)\n\n # [batch, seq_len, num_attention_heads, 3 * head_size] --> 3 [batch, num_attention_heads, seq_len, head_size]\n query = qkv[..., : self.head_size].permute(0, 2, 1, 3)\n t_layer = qkv[..., self.head_size : 2 * self.head_size].permute(0, 2, 1, 3)\n value = qkv[..., 2 * self.head_size :].permute(0, 2, 1, 3)\n\n t_layer_1 = t_layer[..., : t_layer.shape[-1] // 2]\n t_layer_2 = t_layer[..., t_layer.shape[-1] // 2 :]\n t_layer = (t_layer_1+t_layer_2)/2\n\n t_layer = F.relu(t_layer)\n\n t_layer = torch.cat((t_layer, t_layer), dim=-1)\n\n # Compute rotary embeddings on rotary_ndims\n query_rot = query[..., : self.rotary_ndims]\n query_pass = query[..., self.rotary_ndims :]\n t_rot = t_layer[..., : self.rotary_ndims]\n t_pass = t_layer[..., self.rotary_ndims :]\n\n # Compute token offset for rotary embeddings (when decoding)\n seq_len = t_layer.shape[-2]\n if has_layer_past:\n seq_len += layer_past[0].shape[-2]\n cos, sin = self.rotary_emb(value, seq_len=seq_len)\n query_rot, t_layer = apply_rotary_pos_emb(query_rot, t_rot, cos, sin, position_ids)\n query_rot = torch.cat((query_rot, query_pass), dim=-1)\n t_layer = torch.cat((t_layer, t_pass), dim=-1)\n\n # Cache QKV values\n if has_layer_past:\n past_t = layer_past[0]\n past_value = layer_past[1]\n t_layer = torch.cat((past_t, t_layer), dim=-2)\n value = torch.cat((past_value, value), dim=-2)\n present = (t_layer, value) if use_cache else None\n\n # Compute attention\n attn_output, attn_weights = self._attn(query, t_layer, query_rot, value, attention_mask, head_mask)\n\n # Reshape outputs\n attn_output = self._merge_heads(attn_output, self.num_attention_heads, self.head_size)\n attn_output = self.dense(attn_output)\n\n outputs = (attn_output, present)\n if output_attentions:\n outputs += (attn_weights,)\n\n return outputs\n\n @classmethod\n def _split_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Splits hidden dim into attn_head_size and num_attention_heads\n \"\"\"\n # tensor: [bs, seq_len, hidden_size]\n new_shape = tensor.size()[:-1] + (num_attention_heads, attn_head_size)\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(new_shape)\n # -> [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3)\n return tensor\n\n @classmethod\n def _merge_heads(cls, tensor, num_attention_heads, attn_head_size):\n \"\"\"\n Merges attn_head_size dim and num_attn_heads dim into hidden dim\n \"\"\"\n # tensor [bs, num_attention_heads, seq_len, attn_head_size]\n tensor = tensor.permute(0, 2, 1, 3).contiguous()\n # -> [bs, seq_len, num_attention_heads, attn_head_size]\n tensor = tensor.view(tensor.size(0), tensor.size(1), num_attention_heads * attn_head_size)\n # -> [bs, seq_len, hidden_size]\n return tensor\n\n def _attn(self, query, t_layer, query_rot, value, attention_mask=None, head_mask=None):\n # q, k, v: [bs, num_attention_heads, seq_len, attn_head_size]\n # compute causal mask from causal mask buffer\n batch_size, num_attention_heads, query_length, attn_head_size = query.size()\n key_length = t_layer.size(-2)\n\n # dynamically increase the causal mask with the key length, if needed.\n if key_length > self.bias.shape[-1]:\n self._init_bias(key_length, device=t_layer.device)\n causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]\n\n # query = query.view(batch_size * num_attention_heads, query_length, attn_head_size)\n # key = key.view(batch_size * num_attention_heads, key_length, attn_head_size)\n # attn_scores = torch.zeros(\n # batch_size * num_attention_heads,\n # query_length,\n # key_length,\n # dtype=query.dtype,\n # device=key.device,\n # )\n # attn_scores = torch.baddbmm(\n # attn_scores,\n # query,\n # key.transpose(1, 2),\n # beta=1.0,\n # alpha=(torch.tensor(1.0, dtype=self.norm_factor.dtype, device=self.norm_factor.device) / self.norm_factor),\n # )\n # attn_scores = attn_scores.view(batch_size, num_attention_heads, query_length, key_length)\n\n # print(query.shape)\n # print(t_layer.shape)\n # print(query_rot.shape)\n\n attn_scores = contract(\n # 'nbpd,sbpd,nbpd->bpns',\n 'bpnd,bpsd,bpnd->bpns',\n query, # [sq, b, np, hn] [b,np,sq,hn]\n t_layer, #[sk, b, np, hn] [b,np,sk,hn]\n query_rot, # [sq, b, np, hn] [b,np,sq,hn]\n backend='torch'\n ) / self.norm_factor\n\n mask_value = torch.finfo(attn_scores.dtype).min\n # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.\n # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`\n mask_value = torch.tensor(mask_value, dtype=attn_scores.dtype).to(attn_scores.device)\n attn_scores = torch.where(causal_mask, attn_scores, mask_value)\n\n if attention_mask is not None:\n # Apply the attention mask\n attn_scores = attn_scores + attention_mask\n\n attn_weights = nn.functional.softmax(attn_scores, dim=-1)\n attn_weights = attn_weights.to(value.dtype)\n\n # Mask heads if we want to\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n return attn_output, attn_weights" }, { "identifier": "GPTNeoXMLP", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.dense_h_to_4h = nn.Linear(config.hidden_size, config.intermediate_size)\n self.dense_4h_to_h = nn.Linear(config.intermediate_size, config.hidden_size)\n self.act = ACT2FN[config.hidden_act]\n\n def forward(self, hidden_states):\n hidden_states = self.dense_h_to_4h(hidden_states)\n hidden_states = self.act(hidden_states)\n hidden_states = self.dense_4h_to_h(hidden_states)\n return hidden_states" }, { "identifier": "LlamaDecoderLayer", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaDecoderLayer(nn.Module):\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.hidden_size = config.hidden_size\n self.self_attn = LlamaAttention(config=config)\n self.mlp = LlamaMLP(config)\n self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`\n attention_mask (`torch.FloatTensor`, *optional*): attention mask of size\n `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more detail.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding\n (see `past_key_values`).\n past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states\n \"\"\"\n\n residual = hidden_states\n\n hidden_states = self.input_layernorm(hidden_states)\n\n # Self Attention\n hidden_states, self_attn_weights, present_key_value = self.self_attn(\n hidden_states=hidden_states,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_value=past_key_value,\n output_attentions=output_attentions,\n use_cache=use_cache,\n )\n hidden_states = residual + hidden_states\n\n # Fully Connected\n residual = hidden_states\n hidden_states = self.post_attention_layernorm(hidden_states)\n hidden_states = self.mlp(hidden_states)\n hidden_states = residual + hidden_states\n\n outputs = (hidden_states,)\n\n if output_attentions:\n outputs += (self_attn_weights,)\n\n if use_cache:\n outputs += (present_key_value,)\n\n return outputs" }, { "identifier": "LlamaAttention", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaAttention(nn.Module):\n \"\"\"Multi-headed attention from 'Attention Is All You Need' paper\"\"\"\n\n def __init__(self, config: LlamaConfig):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n self.num_key_value_heads = config.num_key_value_heads\n self.num_key_value_groups = self.num_heads // self.num_key_value_heads\n self.max_position_embeddings = config.max_position_embeddings\n\n #20230803 T需要保持非负\n self.relu = ACT2FN['relu']\n\n if (self.head_dim * self.num_heads) != self.hidden_size:\n raise ValueError(\n f\"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}\"\n f\" and `num_heads`: {self.num_heads}).\"\n )\n self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)\n #20230803 K改为T\n self.t_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n # self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)\n self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False)\n self._init_rope()\n\n def _init_rope(self):\n if self.config.rope_scaling is None:\n self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings)\n else:\n scaling_type = self.config.rope_scaling[\"type\"]\n scaling_factor = self.config.rope_scaling[\"factor\"]\n if scaling_type == \"linear\":\n self.rotary_emb = LlamaLinearScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n elif scaling_type == \"dynamic\":\n self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding(\n self.head_dim, max_position_embeddings=self.max_position_embeddings, scaling_factor=scaling_factor\n )\n else:\n raise ValueError(f\"Unknown RoPE scaling type {scaling_type}\")\n\n def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):\n return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_value: Optional[Tuple[torch.Tensor]] = None,\n output_attentions: bool = False,\n use_cache: bool = False,\n ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:\n bsz, q_len, _ = hidden_states.size()\n\n # todo tp>1\n if self.config.pretraining_tp > 1:\n key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp\n query_slices = self.q_proj.weight.split(\n (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0\n )\n key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)\n value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)\n\n query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]\n query_states = torch.cat(query_states, dim=-1)\n\n key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]\n key_states = torch.cat(key_states, dim=-1)\n\n value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]\n value_states = torch.cat(value_states, dim=-1)\n\n else:\n query_states = self.q_proj(hidden_states)\n #20230803 K改为T\n t_states = self.t_proj(hidden_states)\n # key_states = self.k_proj(hidden_states)\n value_states = self.v_proj(hidden_states)\n\n query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)\n\n #20230803 T的定义\n t_states = t_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n t_states_1 = t_states[..., : t_states.shape[-1] // 2]\n t_states_2 = t_states[..., t_states.shape[-1] // 2 :]\n t_states = (t_states_1+t_states_2)/2\n t_states = F.relu(t_states)\n t_states = torch.cat((t_states, t_states), dim=-1)\n\n # key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)\n\n kv_seq_len = t_states.shape[-2]\n if past_key_value is not None:\n kv_seq_len += past_key_value[0].shape[-2]\n cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)\n query_rot, t_states = apply_rotary_pos_emb(query_states, t_states, cos, sin, position_ids)\n\n if past_key_value is not None:\n # reuse k, v, self_attention\n t_states = torch.cat([past_key_value[0], t_states], dim=2)\n value_states = torch.cat([past_key_value[1], value_states], dim=2)\n\n past_key_value = (t_states, value_states) if use_cache else None\n\n # repeat k/v heads if n_kv_heads < n_heads\n t_states = repeat_kv(t_states, self.num_key_value_groups)\n value_states = repeat_kv(value_states, self.num_key_value_groups)\n\n attn_weights = contract(\n 'bpnd,bpsd,bpnd->bpns',\n query_states, # [b,p,sq,d]\n t_states, # [b,p,sk,d]\n query_rot, # [b,p,sq,d]\n backend='torch'\n ) / math.sqrt(self.head_dim)\n # attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)\n\n if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is\"\n f\" {attn_weights.size()}\"\n )\n\n if attention_mask is not None:\n if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):\n raise ValueError(\n f\"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}\"\n )\n attn_weights = attn_weights + attention_mask\n\n # upcast attention to fp32\n attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)\n attn_output = torch.matmul(attn_weights, value_states)\n\n if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):\n raise ValueError(\n f\"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is\"\n f\" {attn_output.size()}\"\n )\n\n attn_output = attn_output.transpose(1, 2).contiguous()\n attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)\n\n if self.config.pretraining_tp > 1:\n attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)\n o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)\n attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])\n else:\n attn_output = self.o_proj(attn_output)\n\n if not output_attentions:\n attn_weights = None\n\n return attn_output, attn_weights, past_key_value" }, { "identifier": "LlamaMLP", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n self.hidden_size = config.hidden_size\n self.intermediate_size = config.intermediate_size\n self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False)\n self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False)\n self.act_fn = ACT2FN[config.hidden_act]\n\n def forward(self, x):\n if self.config.pretraining_tp > 1:\n slice = self.intermediate_size // self.config.pretraining_tp\n gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)\n up_proj_slices = self.up_proj.weight.split(slice, dim=0)\n down_proj_slices = self.down_proj.weight.split(slice, dim=1)\n\n gate_proj = torch.cat(\n [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1\n )\n up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)\n\n intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)\n down_proj = [\n F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)\n ]\n down_proj = sum(down_proj)\n else:\n down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))\n\n return down_proj" }, { "identifier": "PeftModel", "path": "model/peft/modeling_peft.py", "snippet": "class AntPeftForCausalLM(PeftModelForCausalLM):\nclass AntPeftForEmbedding(PeftModel):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n input_ids=None,\n position_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n route_id: int = 0,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n def from_pretrained(\n cls,\n model,\n model_id: str,\n adapter_name: str = \"default\",\n is_trainable: bool = False,\n resume_from_checkpoint: bool = False,\n **kwargs\n ):\n def __init__(self, model, peft_config: PeftConfig, adapter_name: str = \"default\"):\n def set_route_id(self, route_id: int):\n def expand_external_router(self, path: str):\n def forward(\n self,\n query_ids: torch.Tensor,\n query_position_ids: torch.Tensor = None,\n query_attention_mask: torch.Tensor = None,\n query_mask: torch.Tensor = None,\n passage_ids: torch.Tensor = None,\n passage_position_ids: torch.Tensor = None,\n passage_attention_mask: torch.Tensor = None,\n passage_mask: torch.Tensor = None,\n route_id: int = 0,\n inputs_embeds=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n def save_pretrained(self, save_directory, **kwargs):\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):" } ]
import datetime import json import logging import math import os import random import re import shutil import time import warnings import gc import numpy as np import atorch import torch from functools import partial from pathlib import Path from deepspeed.ops.adam import DeepSpeedCPUAdam from torch.distributed.fsdp import FullStateDictConfig from torch.distributed.fsdp import FullyShardedDataParallel as FSDP from torch.distributed.fsdp import StateDictType from torch.optim.lr_scheduler import LambdaLR, CosineAnnealingLR, CosineAnnealingWarmRestarts from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from torch.utils.tensorboard import SummaryWriter from tqdm.auto import tqdm from transformers import get_scheduler as get_scheduler_trans from transformers.modeling_utils import PreTrainedModel, unwrap_model from transformers.trainer import ( OPTIMIZER_NAME, SCHEDULER_NAME, TRAINER_STATE_NAME, TRAINING_ARGS_NAME ) from transformers.trainer_pt_utils import reissue_pt_warnings from transformers.trainer_utils import ( PREFIX_CHECKPOINT_DIR, ) from transformers.utils import WEIGHTS_NAME from torch.nn import CrossEntropyLoss from utils.common_utils import print_rank_0, get_tflops_megatron, get_computation_speed, TASK2ID, ID2TASK, EarlyStopping, logger from utils.auto_accelerate_utils import FAMO, get_ltor_masks_and_position_ids, SelfPacedStatus from atorch.auto import auto_accelerate from atorch.utils.version import torch_version from model.gpt_neox.modeling_gpt_neox import GPTNeoXLayer, GPTNeoXAttention, GPTNeoXMLP from model.llama.modeling_llama import LlamaDecoderLayer, LlamaAttention, LlamaMLP from model.glm.modeling_glm import GLMBlock from torch.cuda.amp import GradScaler from apex.optimizers import FusedSGD from model.peft.modeling_peft import PeftModel
11,705
self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP)
#!/usr/bin/env python # coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. HYPER_PARAMETER_NAME = 'hyper_parameters.json' ATORCH_CHECKPOINT_NAME = 'atorch_checkpoint.bin' EPOCH_CHECKPOINT_NAME = 'epoch' FAMO_CHECKPOINT_NAME = 'famo_checkpoint' EMA_CHECKPOINT_NAME = 'ema_checkpoint' # logger = logging.getLogger(__name__) def is_local_main_process(): return atorch.local_rank() == 0 def is_global_main_process(): return atorch.rank() == 0 def has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def count_model_params(model): trainable_params = 0 all_params = 0 for param in model.parameters(): num_params = param.numel() all_params += num_params if param.requires_grad: trainable_params += num_params return all_params, trainable_params class AtorchArguments: def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) def get_linear_schedule_with_log_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): def lr_lambda(current_step: int): inverse_log_warm_up = 1.0 / math.log(num_warmup_steps) if current_step == 0: return 0.0 if current_step < num_warmup_steps: return inverse_log_warm_up * math.log(current_step) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_scheduler(name, optimizer, num_warmup_steps, num_training_steps): scheduler_map = { 'log_warmup_linear_decay': get_linear_schedule_with_log_warmup} try: lr_scheduler = get_scheduler_trans( name, optimizer, num_warmup_steps, num_training_steps) return lr_scheduler except Exception: schedule_func = scheduler_map[name] return schedule_func(optimizer, num_warmup_steps, num_training_steps) class AtorchTrainer: def __init__(self, model, args, train_dataset, valid_dataset, tokenizer=None, callbacks=None, no_save_atorch_checkpoint=None, save_pytorch_model_bin_checkpoint=True, train_peft=False, rank=0, max_shard_size='10GB', files_to_save=None, args_to_save=None, data_collator=None, my_loss_func=None, **kwargs, ): self.args = args self.TASK2ID = TASK2ID self.ID2TASK = ID2TASK print('in atorch trainer') print(TASK2ID) print(ID2TASK) self.model = model self.no_save_atorch_checkpoint = no_save_atorch_checkpoint self.save_pytorch_model_bin_checkpoint = save_pytorch_model_bin_checkpoint self.train_peft = train_peft self.rank = rank self.kwargs = kwargs self.train_dataset = train_dataset self.valid_dataset = valid_dataset self.tokenizer = tokenizer self.max_shard_size = max_shard_size self.files_to_save = files_to_save self.args_to_save = args_to_save self.best_metric = None self.best_model_checkpoint = None self.no_save_base_model = True self.device = f"cuda:{atorch.local_rank()}" self.famo = FAMO(n_tasks=len(TASK2ID), device=self.device, mode=self.args.weighted_loss_mode) self.famo_resume = False self.selfpaced_status = SelfPacedStatus(args.selfpaced_interval) self.total_train_batch_size = self.args.per_device_train_batch_size * \ self.args.gradient_accumulation_steps * \ atorch.world_size() self.data_collator = data_collator self.my_loss_func = my_loss_func if self.args.early_stopping_patience > 0: print(f'early_stopping_patience: {self.args.early_stopping_patience}') patience = self.args.early_stopping_patience self.early_stopping = EarlyStopping(patience, verbose=True) self.train_dataloader_args = { "shuffle": True, "batch_size": self.total_train_batch_size, "pin_memory": True, "collate_fn": data_collator, "drop_last": True, "num_workers": self.args.num_workers, # "persistent_workers": args.num_workers > 0, } self.valid_dataloader = DataLoader( valid_dataset, sampler=DistributedSampler(valid_dataset, shuffle=True), batch_size=args.per_device_valid_batch_size, pin_memory=True, collate_fn=data_collator ) self.valid_dataloader_length = len(self.valid_dataloader) if self.args.resume_from_checkpoint == 'true': self.resume_checkpoint_dir = self.get_last_checkpoint( self.args.output_dir) self.atorch_args = AtorchArguments( lr=args.learning_rate, weight_decay=args.weight_decay, adam_eps=args.adam_epsilon, adam_beta1=args.adam_beta1, adam_beta2=args.adam_beta2) self.atorch_init() self.num_update_steps_per_epoch = math.ceil( len(self.train_dataloader) / self.args.gradient_accumulation_steps) print(f'number of update steps per epoch: {self.num_update_steps_per_epoch}') if self.args.max_steps == -1: self.args.max_steps = int( self.args.num_train_epochs * self.num_update_steps_per_epoch) else: self.args.num_train_epochs = math.ceil( self.args.max_steps / self.num_update_steps_per_epoch) # self.args.warmup_steps = self.args.get_warmup_steps( # self.args.max_steps) # 找不到get_warmup_steps custom_lr_scheduler_type = self.kwargs.get( 'custom_lr_scheduler_type', None) self.lr_scheduler = get_scheduler( name=custom_lr_scheduler_type if custom_lr_scheduler_type else self.args.lr_scheduler_type, optimizer=self.optimizer, num_warmup_steps=self.args.num_warmup_steps, num_training_steps=self.args.max_steps, ) print_rank_0(f'lr_scheduler{self.lr_scheduler}') if self.args.resume_from_checkpoint == 'true': with warnings.catch_warnings(record=True): self.lr_scheduler.load_state_dict(torch.load( os.path.join(self.resume_checkpoint_dir, SCHEDULER_NAME))) self._load_rng_state(self.resume_checkpoint_dir) torch.distributed.barrier() now_datetime = datetime.datetime.now() timestr = datetime.datetime.strftime(now_datetime, '%Y%m%d-%H%M%S') self.log_dir = os.path.join(self.args.output_dir, 'runs', timestr) self.summary_writer = None if torch.distributed.get_rank() == 0: self.summary_writer = SummaryWriter(log_dir=self.log_dir) def get_last_checkpoint(self, folder): _re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)") content = sorted(os.listdir(folder)) checkpoints = [ path for path in content if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path)) ] if len(checkpoints) == 0: return return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0]))) def _load_rng_state(self, resume_checkpoint_dir): # Load RNG states from `checkpoint` if resume_checkpoint_dir is None: return if self.args.world_size > 1: rng_file = os.path.join( resume_checkpoint_dir, f"rng_state_{self.rank}.pth") if not os.path.isfile(rng_file): logger.info( f"Didn't find an RNG file for process {self.rnak}, if you are resuming a training that " "wasn't launched in a distributed fashion, reproducibility is not guaranteed." ) return else: rng_file = os.path.join(resume_checkpoint_dir, "rng_state.pth") if not os.path.isfile(rng_file): logger.info( "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " "fashion, reproducibility is not guaranteed." ) return checkpoint_rng_state = torch.load(rng_file) random.setstate(checkpoint_rng_state["python"]) np.random.set_state(checkpoint_rng_state["numpy"]) torch.random.set_rng_state(checkpoint_rng_state["cpu"]) if torch.cuda.is_available(): if self.args.local_rank != -1: torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) else: try: torch.cuda.random.set_rng_state_all( checkpoint_rng_state["cuda"]) except Exception as e: logger.info( f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" "\nThis won't yield the same results as if the training had not been interrupted." ) def load_atorch_model_state(self, model_state_dict, **kwargs): print('resume atorch model state') if self.is_rank0(): self.model.load_state_dict(model_state_dict) # 在 rank 0 加载完毕后,再通过sync_module_states分发参数 torch.distributed.barrier() # self.model = FSDP(self.model, sync_module_states=True, **kwargs) def load_atorch_optim_state(self, optim_state_dict): print('resume optimizer state') optim_state_dict = FSDP.scatter_full_optim_state_dict( optim_state_dict, self.model) # may be removed after PyTorch 2.2 def move_optim_state_to_cpu(optim_state_dict): for k in optim_state_dict: if isinstance(optim_state_dict[k], torch.Tensor): optim_state_dict[k] = optim_state_dict[k].cpu() elif isinstance(optim_state_dict[k], dict): move_optim_state_to_cpu(optim_state_dict[k]) move_optim_state_to_cpu(optim_state_dict) self.optimizer.load_state_dict(optim_state_dict) def load_famo_state(self): print_rank_0(f'loading famo checkpoint') self.famo_resume = True famo_dir = os.path.join(self.resume_checkpoint_dir, 'famo_checkpoint/') if not os.path.exists(famo_dir): print_rank_0(f'can not find the famo checkpoint dir!') else: famo_state_name = FAMO_CHECKPOINT_NAME + f'_rank_{self.rank}.pth' famo_checkpoint_state = torch.load(os.path.join(famo_dir, famo_state_name)) w_opt_state = famo_checkpoint_state['w_opt_state'] self.famo.prev_train_loss = famo_checkpoint_state['prev_train_loss'].to(self.famo.device) self.famo.prev_valid_loss = famo_checkpoint_state['prev_valid_loss'].to(self.famo.device) self.famo.first_train_step = famo_checkpoint_state['first_train_step'] self.famo.first_valid_step = famo_checkpoint_state['first_valid_step'] self.famo.ratio_valid_task_loss_prev = famo_checkpoint_state['ratio_valid_task_loss_prev'].to(self.famo.device) self.famo.w = famo_checkpoint_state['w'].to(self.famo.device) self.famo.w_opt.load_state_dict(w_opt_state) print_rank_0(f'prev_train_loss: {self.famo.prev_train_loss}') print_rank_0(f'prev_valid_loss: {self.famo.prev_valid_loss}') print_rank_0(f'first_train_step: {self.famo.first_train_step}') print_rank_0(f'first_valid_step: {self.famo.first_valid_step}') print_rank_0(f'ratio_valid_task_loss_prev: {self.famo.ratio_valid_task_loss_prev}') print_rank_0(f'w: {self.famo.w}') print_rank_0(f'load famo checkpoint successfully') def atorch_init(self): assert torch_version() >= (2, 0, 0), "use pt2.0 for use orig param if fsdp" if self.args.model_type == 'gpt_neox': # wrap_class = (GPTNeoXAttention, GPTNeoXMLP) wrap_class = (GPTNeoXLayer,) elif self.args.model_type == 'llama': # wrap_class = (LlamaAttention, LlamaMLP)
wrap_class = (LlamaDecoderLayer,)
7
2023-11-02 01:37:01+00:00
16k
bytedance/cryostar
projects/star/train_density.py
[ { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "ImplicitFourierVolume", "path": "cryostar/nerf/volume_utils.py", "snippet": "class ImplicitFourierVolume(nn.Module):\n\n def __init__(self, z_dim, img_sz, mask_rad, params_implicit):\n \"\"\"\n Initialization of an implicit representation of the volume in Fourier space.\n\n Parameters\n ----------\n img_sz: int\n params_implicit: dictionary\n \"\"\"\n super().__init__()\n self.img_sz = img_sz\n self.z_dim = z_dim\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y] = torch.meshgrid([lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Y, X, torch.zeros_like(X)], dim=-1)\n coords = shift_coords(coords, 1., 1., 0, img_sz, img_sz, 1)\n self.register_buffer('plane_coords', coords.reshape(-1, 3))\n\n self.mask_rad = mask_rad\n if self.mask_rad != 1:\n mask = create_circular_mask(img_sz, img_sz, None, self.mask_rad / 2 * img_sz)\n plane_window_mask = torch.from_numpy(mask).reshape(-1)\n self.register_buffer('plane_window_mask', plane_window_mask)\n sphere_mask = torch.from_numpy(\n create_sphere_mask(self.img_sz, self.img_sz, self.img_sz, radius=self.mask_rad / 2 * self.img_sz)\n )\n self.register_buffer(\"sphere_mask\", sphere_mask)\n\n lincoords = torch.linspace(-1., 1., self.img_sz)\n [X, Y, Z] = torch.meshgrid([lincoords, lincoords, lincoords], indexing=\"ij\")\n coords = torch.stack([Z, Y, X], dim=-1)\n coords = shift_coords(coords, 1., 1., 1., img_sz, img_sz, img_sz)\n self.register_buffer('coords_3d', coords.reshape(-1, 3))\n\n self.fvol = FourierNet(net_type=params_implicit[\"net_type\"],\n z_dim=z_dim,\n pe_dim=params_implicit[\"pe_dim\"],\n pe_type=params_implicit[\"pe_type\"],\n D=params_implicit[\"D\"],\n hidden_dim=params_implicit[\"hidden\"],\n force_symmetry=params_implicit['force_symmetry'])\n\n def forward(self, z, rotmat):\n \"\"\"\n Generates a slice in Fourier space from a rotation matrix.\n\n Parameters\n ----------\n rotmat: torch.Tensor (B, 3, 3)\n\n Returns\n -------\n fplane: torch.Tensor (B, 1, img_sz, img_sz) (complex)\n \"\"\"\n if self.z_dim == 0:\n assert z is None\n batch_sz = rotmat.shape[0]\n\n with torch.autocast(\"cuda\", enabled=False):\n assert self.plane_coords.dtype == torch.float32\n assert rotmat.dtype == torch.float32\n rot_plane_coords = torch.bmm(self.plane_coords.repeat(batch_sz, 1, 1), rotmat) # B, img_sz^2, 3\n\n if self.mask_rad != 1:\n coords_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c3\", bsz=batch_sz, c3=3)\n rot_plane_coords = rot_plane_coords[coords_mask].reshape(batch_sz, -1, 3) # B, mask_num, 3\n\n fplane = self.fvol(z, rot_plane_coords) # B, _, 1/2\n\n if self.mask_rad != 1:\n unmask_fplane = fplane.new_zeros(batch_sz, self.img_sz * self.img_sz, self.fvol.out_features)\n value_mask = einops.repeat(self.plane_window_mask, \"num_coords -> bsz num_coords c\", bsz=batch_sz, c=self.fvol.out_features)\n unmask_fplane[value_mask] = fplane.reshape(-1)\n fplane = unmask_fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n else:\n fplane = fplane.reshape(batch_sz, self.img_sz, self.img_sz, self.fvol.out_features)\n\n if self.fvol.out_features == 2:\n fplane = torch.view_as_complex(fplane) # B, img_sz, img_sz\n else:\n fplane = batch_hartley_to_fourier_2d(fplane.squeeze(-1)) # B, img_sz, img_sz\n\n fplane = fplane[:, None, :, :]\n return fplane\n\n def make_volume(self, z):\n with torch.no_grad():\n with torch.autocast(\"cuda\", enabled=False):\n coords = self.coords_3d.unsqueeze(0)\n num_coords = coords.shape[1]\n chunk_size = 128**2 * 32\n exp_fvol = []\n for sid in range(0, num_coords, chunk_size):\n eid = sid + chunk_size\n exp_fvol.append(self.fvol(z, coords[:, sid:eid]))\n exp_fvol = torch.cat(exp_fvol, dim=1)\n if self.fvol.out_features == 2:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz, 2)\n exp_fvol = torch.view_as_complex(exp_fvol)\n else:\n exp_fvol = exp_fvol.reshape(self.img_sz, self.img_sz, self.img_sz)\n exp_fvol = hartley_to_fourier_3d(exp_fvol)\n\n exp_fvol[~self.sphere_mask] = 0.0\n exp_vol = fourier_to_primal_3d(exp_fvol).real\n return exp_vol" }, { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "FourierGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class FourierGridTranslate(torch.nn.Module):\n \"\"\"\n DFT's translation is:\n `f(x - x0, y - y0) <=> F(u, v) exp(-2 j \\pi (x0 u + y0 v) / N )`\n where `x, y, u, v` all have a range of `N`, so `(x0 u + y0 v) / N \\in (0, N)`\n\n Here we initialize the `u, v` coordinates between `(-0.5, 0.5)` so that the \n range is 1, where the `1/N` term can be ignored.\n\n See also: https://dsp.stackexchange.com/questions/40228/translation-property-of-2-d-discrete-fourier-transform\n\n Important notes:\n If `N=4`, the coordinates u will be `[-0.5, -0.17, 0.17, 0.5]`, but the \n `fft`ed image's frequency is `[-0.50, -0.25, 0.00, 0.25]`, so we have to \n add some corrections:\n - right-shift `u` to be `[-0.50, -0.25, 0.00, 0.25]`\n - perform multiplication\n\n \"\"\"\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2) / 2\n # yapf: enable\n coords = shift_coords(coords, 0.5, 0.5, None, self.D, self.D, None, False)\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n images = einops.rearrange(images, \"B NY NX -> B 1 (NY NX)\")\n delta = trans @ self.coords.t() * -2j * torch.pi\n images_trans = torch.exp(delta) * images\n images_trans = einops.rearrange(images_trans, \"B T (NY NX) -> B T NY NX\", NY=self.D, NX=self.D)\n return images_trans" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "sample_along_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def sample_along_pca(z: np.ndarray, pca_dim=1, num=5) -> np.ndarray:\n assert isinstance(z, np.ndarray)\n pc, pca = run_pca(z)\n start = np.percentile(pc[:, pca_dim - 1], 5)\n stop = np.percentile(pc[:, pca_dim - 1], 95)\n z_pc_traj = get_pc_traj(pca, z.shape[1], num, pca_dim, start, stop)\n point, point_id = get_nearest_point(z, z_pc_traj)\n return point, point_id" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "pl_init_exp", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "VAEEncoder", "path": "cryostar/utils/ml_modules.py", "snippet": "class VAEEncoder(nn.Module):\n\n def __init__(self, in_dim: int, hidden_dim: Union[int, List[int]], out_dim: int, num_hidden_layers=3):\n super().__init__()\n self.in_dim = in_dim\n if isinstance(hidden_dim, int):\n self.hidden_dim = (hidden_dim, ) * num_hidden_layers\n elif isinstance(hidden_dim, (list, tuple)):\n assert len(hidden_dim) == num_hidden_layers\n self.hidden_dim = hidden_dim\n else:\n raise NotImplementedError\n self.out_dim = out_dim\n self.num_hidden_layers = num_hidden_layers\n\n self.input_layer = nn.Sequential(\n ResLinear(in_dim, self.hidden_dim[0]) if in_dim == self.hidden_dim[0] else Linear(\n in_dim, self.hidden_dim[0]), nn.ReLU(inplace=True))\n self.mlp = MLP(self.hidden_dim[:-1], self.hidden_dim[1:])\n\n self.mean_layer = Linear(self.hidden_dim[-1], out_dim)\n self.var_layer = Linear(self.hidden_dim[-1], out_dim)\n\n def forward(self, x):\n x = self.mlp(self.input_layer(x))\n mean = self.mean_layer(x)\n log_var = self.var_layer(x)\n return mean, log_var" }, { "identifier": "reparameterize", "path": "cryostar/utils/ml_modules.py", "snippet": "def reparameterize(mu, log_var):\n std = torch.exp(0.5 * log_var)\n eps = torch.randn_like(std)\n return mu + eps * std" }, { "identifier": "save_mrc", "path": "cryostar/utils/mrc_tools.py", "snippet": "def save_mrc(vol,\n path,\n voxel_size: Union[int, float, Tuple, np.recarray] = None,\n origin: Union[int, float, Tuple, np.recarray] = None):\n \"\"\"\n Save volumetric data to mrc file, set voxel_size, origin.\n See Also: https://mrcfile.readthedocs.io/en/stable/source/mrcfile.html#mrcfile.mrcobject.MrcObject.voxel_size\n Args:\n vol: density volume\n path: save path\n voxel_size: a single number, a 3-tuple (x, y ,z) or a modified version of the voxel_size array, default 1.\n origin: a single number, a 3-tuple (x, y ,z) or a modified version of the origin array, default 0.\n\n \"\"\"\n with mrcfile.new(path, overwrite=True) as m:\n m.set_data(vol)\n\n if voxel_size is not None:\n m.voxel_size = voxel_size\n\n if origin is not None:\n m.header.origin = origin" } ]
import os import os.path as osp import einops import lightning.pytorch as pl import numpy as np import torch from lightning.pytorch.strategies import DDPStrategy from lightning.pytorch.utilities import rank_zero_only from torch.utils.data import DataLoader from tqdm import tqdm from mmengine import mkdir_or_exist from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig from cryostar.nerf.volume_utils import ImplicitFourierVolume from cryostar.utils.transforms import SpatialGridTranslate, FourierGridTranslate from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.fft_utils import (fourier_to_primal_2d, primal_to_fourier_2d) from cryostar.utils.latent_space_utils import sample_along_pca, get_nearest_point, cluster_kmeans from cryostar.utils.misc import (pl_init_exp, create_circular_mask, log_to_current, pretty_dict) from cryostar.utils.losses import calc_kl_loss from cryostar.utils.ml_modules import VAEEncoder, reparameterize from cryostar.utils.mrc_tools import save_mrc from miscs import infer_ctf_params_from_config
11,009
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input) z = reparameterize(mu, log_var)
log_to_current = rank_zero_only(log_to_current) TASK_NAME = "density" class CryoModel(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() self.cfg = cfg self.dataset = dataset self.z_dim = cfg.model.z_dim self.history_saved_dirs = [] if cfg.extra_input_data_attr.given_z is None and self.z_dim != 0: if cfg.model.enc_space == "real": self.encoder = VAEEncoder(self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) elif cfg.model.enc_space == "fourier": self.encoder = VAEEncoder(2 * self.cfg.data_process.down_side_shape**2, cfg.model.hidden, self.z_dim, num_hidden_layers=4) else: raise NotImplementedError if cfg.model.shift_method == "interp": self.translate = SpatialGridTranslate(self.cfg.data_process.down_side_shape, ) log_to_current("We will deprecate `model.shift_method=interp` in a future version, use `model.shift_method=fft` instead.") elif cfg.model.shift_method == "fft": self.f_translate = FourierGridTranslate(self.cfg.data_process.down_side_shape, ) else: raise NotImplementedError ctf_params = infer_ctf_params_from_config(cfg) if cfg.model.ctf == "v1": self.ctf = CTFRelion(**ctf_params, num_particles=len(dataset)) log_to_current("We will deprecate `model.ctf=v1` in a future version, use `model.ctf=v2` instead.") elif cfg.model.ctf == "v2": self.ctf = CTFCryoDRGN(**ctf_params, num_particles=len(dataset)) else: raise NotImplementedError log_to_current(ctf_params) self.vol = ImplicitFourierVolume( self.z_dim, self.cfg.data_process.down_side_shape, self.cfg.loss.mask_rad_for_image_loss, { "net_type": cfg.model.net_type, "pe_dim": self.cfg.data_process.down_side_shape, "D": self.cfg.data_process.down_side_shape, "pe_type": cfg.model.pe_type, "force_symmetry": False, "hidden": cfg.model.hidden, }) mask = create_circular_mask(self.cfg.data_process.down_side_shape, self.cfg.data_process.down_side_shape, None, self.cfg.data_process.down_side_shape // 2 * self.cfg.loss.mask_rad_for_image_loss,) self.register_buffer("mask", torch.from_numpy(mask)) if cfg.extra_input_data_attr.given_z is not None: self.register_buffer("given_z", torch.from_numpy(np.load(cfg.extra_input_data_attr.given_z))) if getattr(self.cfg.extra_input_data_attr, "ckpt_path", None) is not None: log_to_current(f"load checkpoint from {self.cfg.extra_input_data_attr.ckpt_path}") state_dict = torch.load(self.cfg.extra_input_data_attr.ckpt_path, map_location=self.device) self.vol.load_state_dict(state_dict) def _get_save_dir(self): save_dir = os.path.join(self.cfg.work_dir, f"{self.current_epoch:04d}_{self.global_step:07d}") mkdir_or_exist(save_dir) return save_dir def process_image(self, batch): R = batch["rotmat"] bsz = len(R) trans = torch.cat([ batch["shiftY"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix, batch["shiftX"].float().reshape(bsz, 1, 1) / self.cfg.data_process.down_apix ], dim=2) proj_in = batch["proj"].to(self.device) if self.cfg.model.shift_method == "interp": proj = self.translate.transform(proj_in.squeeze(1), trans.to(self.device)) elif self.cfg.model.shift_method == "fft": fproj = primal_to_fourier_2d(proj_in) fproj = self.f_translate.transform(fproj.squeeze(1), trans.to(self.device)) proj = fourier_to_primal_2d(fproj) if self.cfg.model.shift_data: return proj, proj else: return proj_in, proj def training_step(self, batch, batch_idx): R = batch["rotmat"] bsz = len(R) proj_in, proj_out = self.process_image(batch) f_proj_in = primal_to_fourier_2d(proj_in) if self.z_dim != 0: if self.cfg.extra_input_data_attr.given_z is not None: z = self.given_z[batch["idx"]].reshape(bsz, -1) kld_loss = 0.0 else: if self.cfg.model.enc_space == "fourier": enc_input = einops.rearrange(torch.view_as_real(f_proj_in), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) elif self.cfg.model.enc_space == "real": enc_input = einops.rearrange(proj_in, "b 1 ny nx -> b (1 ny nx)") mu, log_var = self.encoder(enc_input) z = reparameterize(mu, log_var)
kld_loss = calc_kl_loss(mu, log_var, self.cfg.loss.free_bits)
13
2023-11-06 07:15:26+00:00
16k
UMass-Foundation-Model/CoVLM
transformers/src/transformers/models/unispeech/configuration_unispeech.py
[ { "identifier": "PretrainedConfig", "path": "transformers/src/transformers/configuration_utils.py", "snippet": "class PretrainedConfig(PushToHubMixin):\n r\"\"\"\n Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as\n methods for loading/downloading/saving configurations.\n\n <Tip>\n\n A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to\n initialize a model does **not** load the model weights. It only affects the model's configuration.\n\n </Tip>\n\n Class attributes (overridden by derived classes):\n\n - **model_type** (`str`) -- An identifier for the model type, serialized into the JSON file, and used to recreate\n the correct object in [`~transformers.AutoConfig`].\n - **is_composition** (`bool`) -- Whether the config class is composed of multiple sub-configs. In this case the\n config has to be initialized from two or more configs of type [`~transformers.PretrainedConfig`] like:\n [`~transformers.EncoderDecoderConfig`] or [`~RagConfig`].\n - **keys_to_ignore_at_inference** (`List[str]`) -- A list of keys to ignore by default when looking at dictionary\n outputs of the model during inference.\n - **attribute_map** (`Dict[str, str]`) -- A dict that maps model specific attribute names to the standardized\n naming of attributes.\n\n Common attributes (present in all subclasses):\n\n - **vocab_size** (`int`) -- The number of tokens in the vocabulary, which is also the first dimension of the\n embeddings matrix (this attribute may be missing for models that don't have a text modality like ViT).\n - **hidden_size** (`int`) -- The hidden size of the model.\n - **num_attention_heads** (`int`) -- The number of attention heads used in the multi-head attention layers of the\n model.\n - **num_hidden_layers** (`int`) -- The number of blocks in the model.\n\n Arg:\n name_or_path (`str`, *optional*, defaults to `\"\"`):\n Store the string that was passed to [`PreTrainedModel.from_pretrained`] or\n [`TFPreTrainedModel.from_pretrained`] as `pretrained_model_name_or_path` if the configuration was created\n with such a method.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not the model should return all hidden-states.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not the model should returns all attentions.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return a [`~transformers.utils.ModelOutput`] instead of a plain tuple.\n is_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as an encoder/decoder or not.\n is_decoder (`bool`, *optional*, defaults to `False`):\n Whether the model is used as decoder or not (in which case it's used as an encoder).\n cross_attention_hidden_size** (`bool`, *optional*):\n The hidden size of the cross-attention layer in case the model is used as a decoder in an encoder-decoder\n setting and the cross-attention hidden dimension differs from `self.config.hidden_size`.\n add_cross_attention (`bool`, *optional*, defaults to `False`):\n Whether cross-attention layers should be added to the model. Note, this option is only relevant for models\n that can be used as decoder models within the [`EncoderDecoderModel`] class, which consists of all models\n in `AUTO_MODELS_FOR_CAUSAL_LM`.\n tie_encoder_decoder (`bool`, *optional*, defaults to `False`):\n Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder\n and decoder model to have the exact same parameter names.\n prune_heads (`Dict[int, List[int]]`, *optional*, defaults to `{}`):\n Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of\n heads to prune in said layer.\n\n For instance `{1: [0, 2], 2: [2, 3]}` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n chunk_size_feed_forward (`int`, *optional*, defaults to `0`):\n The chunk size of all feed forward layers in the residual attention blocks. A chunk size of `0` means that\n the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes `n` <\n sequence_length embeddings at a time. For more information on feed forward chunking, see [How does Feed\n Forward Chunking work?](../glossary.html#feed-forward-chunking).\n\n > Parameters for sequence generation\n\n max_length (`int`, *optional*, defaults to 20):\n Maximum length that will be used by default in the `generate` method of the model.\n min_length (`int`, *optional*, defaults to 0):\n Minimum length that will be used by default in the `generate` method of the model.\n do_sample (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether or not to use sampling ;\n use greedy decoding otherwise.\n early_stopping (`bool`, *optional*, defaults to `False`):\n Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search\n when at least `num_beams` sentences are finished per batch or not.\n num_beams (`int`, *optional*, defaults to 1):\n Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means\n no beam search.\n num_beam_groups (`int`, *optional*, defaults to 1):\n Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams\n that will be used by default in the `generate` method of the model. 1 means no group beam search.\n diversity_penalty (`float`, *optional*, defaults to 0.0):\n Value to control diversity for group beam search. that will be used by default in the `generate` method of\n the model. 0 means no diversity penalty. The higher the penalty, the more diverse are the outputs.\n temperature (`float`, *optional*, defaults to 1.0):\n The value used to module the next token probabilities that will be used by default in the `generate` method\n of the model. Must be strictly positive.\n top_k (`int`, *optional*, defaults to 50):\n Number of highest probability vocabulary tokens to keep for top-k-filtering that will be used by default in\n the `generate` method of the model.\n top_p (`float`, *optional*, defaults to 1):\n Value that will be used by default in the `generate` method of the model for `top_p`. If set to float < 1,\n only the most probable tokens with probabilities that add up to `top_p` or higher are kept for generation.\n typical_p (`float`, *optional*, defaults to 1):\n Local typicality measures how similar the conditional probability of predicting a target token next is to\n the expected conditional probability of predicting a random token next, given the partial text already\n generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that\n add up to `typical_p` or higher are kept for generation. See [this\n paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.\n repetition_penalty (`float`, *optional*, defaults to 1):\n Parameter for repetition penalty that will be used by default in the `generate` method of the model. 1.0\n means no penalty.\n length_penalty (`float`, *optional*, defaults to 1):\n Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to\n the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log\n likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while\n `length_penalty` < 0.0 encourages shorter sequences.\n no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by default in the\n `generate` method of the model for `no_repeat_ngram_size`. If set to int > 0, all ngrams of that size can\n only occur once.\n encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0) -- Value that will be used by\n default in the `generate` method of the model for `encoder_no_repeat_ngram_size`. If set to int > 0, all\n ngrams of that size that occur in the `encoder_input_ids` cannot occur in the `decoder_input_ids`.\n bad_words_ids (`List[int]`, *optional*):\n List of token ids that are not allowed to be generated that will be used by default in the `generate`\n method of the model. In order to get the tokens of the words that should not appear in the generated text,\n use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n num_return_sequences (`int`, *optional*, defaults to 1):\n Number of independently computed returned sequences for each element in the batch that will be used by\n default in the `generate` method of the model.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether the model should return the logits when used for generation.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether the model should return a [`~transformers.utils.ModelOutput`] instead of a `torch.LongTensor`.\n forced_bos_token_id (`int`, *optional*):\n The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for\n multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target\n language token.\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached.\n remove_invalid_values (`bool`, *optional*):\n Whether to remove possible _nan_ and _inf_ outputs of the model to prevent the generation method to crash.\n Note that using `remove_invalid_values` can slow down generation.\n\n > Parameters for fine-tuning tasks\n\n architectures (`List[str]`, *optional*):\n Model architectures that can be used with the model pretrained weights.\n finetuning_task (`str`, *optional*):\n Name of the task used to fine-tune the model. This can be used when converting from an original (TensorFlow\n or PyTorch) checkpoint.\n id2label (`Dict[int, str]`, *optional*):\n A map from index (for instance prediction index, or target index) to label.\n label2id (`Dict[str, int]`, *optional*): A map from label to index for the model.\n num_labels (`int`, *optional*):\n Number of labels to use in the last layer added to the model, typically for a classification task.\n task_specific_params (`Dict[str, Any]`, *optional*):\n Additional keyword arguments to store for the current task.\n problem_type (`str`, *optional*):\n Problem type for `XxxForSequenceClassification` models. Can be one of `\"regression\"`,\n `\"single_label_classification\"` or `\"multi_label_classification\"`.\n\n > Parameters linked to the tokenizer\n\n tokenizer_class (`str`, *optional*):\n The name of the associated tokenizer class to use (if none is set, will use the tokenizer associated to the\n model by default).\n prefix (`str`, *optional*):\n A specific prompt that should be added at the beginning of each text before calling the model.\n bos_token_id (`int`, *optional*): The id of the _beginning-of-stream_ token.\n pad_token_id (`int`, *optional*): The id of the _padding_ token.\n eos_token_id (`int`, *optional*): The id of the _end-of-stream_ token.\n decoder_start_token_id (`int`, *optional*):\n If an encoder-decoder model starts decoding with a different token than _bos_, the id of that token.\n sep_token_id (`int`, *optional*): The id of the _separation_ token.\n\n > PyTorch specific parameters\n\n torchscript (`bool`, *optional*, defaults to `False`):\n Whether or not the model should be used with Torchscript.\n tie_word_embeddings (`bool`, *optional*, defaults to `True`):\n Whether the model's input and output word embeddings should be tied. Note that this is only relevant if the\n model has a output word embedding layer.\n torch_dtype (`str`, *optional*):\n The `dtype` of the weights. This attribute can be used to initialize the model to a non-default `dtype`\n (which is normally `float32`) and thus allow for optimal storage allocation. For example, if the saved\n model is `float16`, ideally we want to load it back using the minimal amount of memory needed to load\n `float16` weights. Since the config object is stored in plain text, this attribute contains just the\n floating type string without the `torch.` prefix. For example, for `torch.float16` ``torch_dtype` is the\n `\"float16\"` string.\n\n This attribute is currently not being used during model loading time, but this may change in the future\n versions. But we can already start preparing for the future by saving the dtype with save_pretrained.\n\n > TensorFlow specific parameters\n\n use_bfloat16 (`bool`, *optional*, defaults to `False`):\n Whether or not the model should use BFloat16 scalars (only used by some TensorFlow models).\n tf_legacy_loss (`bool`, *optional*, defaults to `False`):\n Whether the model should use legacy TensorFlow losses. Legacy losses have variable output shapes and may\n not be XLA-compatible. This option is here for backward compatibility and will be removed in Transformers\n v5.\n \"\"\"\n model_type: str = \"\"\n is_composition: bool = False\n attribute_map: Dict[str, str] = {}\n _auto_class: Optional[str] = None\n\n def __setattr__(self, key, value):\n if key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n super().__setattr__(key, value)\n\n def __getattribute__(self, key):\n if key != \"attribute_map\" and key in super().__getattribute__(\"attribute_map\"):\n key = super().__getattribute__(\"attribute_map\")[key]\n return super().__getattribute__(key)\n\n def __init__(self, **kwargs):\n # Attributes with defaults\n self.return_dict = kwargs.pop(\"return_dict\", True)\n self.output_hidden_states = kwargs.pop(\"output_hidden_states\", False)\n self.output_attentions = kwargs.pop(\"output_attentions\", False)\n self.torchscript = kwargs.pop(\"torchscript\", False) # Only used by PyTorch models\n self.torch_dtype = kwargs.pop(\"torch_dtype\", None) # Only used by PyTorch models\n self.use_bfloat16 = kwargs.pop(\"use_bfloat16\", False)\n self.tf_legacy_loss = kwargs.pop(\"tf_legacy_loss\", False) # Only used by TensorFlow models\n self.pruned_heads = kwargs.pop(\"pruned_heads\", {})\n self.tie_word_embeddings = kwargs.pop(\n \"tie_word_embeddings\", True\n ) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.\n\n # Is decoder is used in encoder-decoder models to differentiate encoder from decoder\n self.is_encoder_decoder = kwargs.pop(\"is_encoder_decoder\", False)\n self.is_decoder = kwargs.pop(\"is_decoder\", False)\n self.cross_attention_hidden_size = kwargs.pop(\"cross_attention_hidden_size\", None)\n self.add_cross_attention = kwargs.pop(\"add_cross_attention\", False)\n self.tie_encoder_decoder = kwargs.pop(\"tie_encoder_decoder\", False)\n\n # Parameters for sequence generation\n self.max_length = kwargs.pop(\"max_length\", 20)\n self.min_length = kwargs.pop(\"min_length\", 0)\n self.do_sample = kwargs.pop(\"do_sample\", False)\n self.early_stopping = kwargs.pop(\"early_stopping\", False)\n self.num_beams = kwargs.pop(\"num_beams\", 1)\n self.num_beam_groups = kwargs.pop(\"num_beam_groups\", 1)\n self.diversity_penalty = kwargs.pop(\"diversity_penalty\", 0.0)\n self.temperature = kwargs.pop(\"temperature\", 1.0)\n self.top_k = kwargs.pop(\"top_k\", 50)\n self.top_p = kwargs.pop(\"top_p\", 1.0)\n self.typical_p = kwargs.pop(\"typical_p\", 1.0)\n self.repetition_penalty = kwargs.pop(\"repetition_penalty\", 1.0)\n self.length_penalty = kwargs.pop(\"length_penalty\", 1.0)\n self.no_repeat_ngram_size = kwargs.pop(\"no_repeat_ngram_size\", 0)\n self.encoder_no_repeat_ngram_size = kwargs.pop(\"encoder_no_repeat_ngram_size\", 0)\n self.bad_words_ids = kwargs.pop(\"bad_words_ids\", None)\n self.num_return_sequences = kwargs.pop(\"num_return_sequences\", 1)\n self.chunk_size_feed_forward = kwargs.pop(\"chunk_size_feed_forward\", 0)\n self.output_scores = kwargs.pop(\"output_scores\", False)\n self.return_dict_in_generate = kwargs.pop(\"return_dict_in_generate\", False)\n self.forced_bos_token_id = kwargs.pop(\"forced_bos_token_id\", None)\n self.forced_eos_token_id = kwargs.pop(\"forced_eos_token_id\", None)\n self.remove_invalid_values = kwargs.pop(\"remove_invalid_values\", False)\n self.exponential_decay_length_penalty = kwargs.pop(\"exponential_decay_length_penalty\", None)\n self.suppress_tokens = kwargs.pop(\"suppress_tokens\", None)\n self.begin_suppress_tokens = kwargs.pop(\"begin_suppress_tokens\", None)\n\n # Fine-tuning task arguments\n self.architectures = kwargs.pop(\"architectures\", None)\n self.finetuning_task = kwargs.pop(\"finetuning_task\", None)\n self.id2label = kwargs.pop(\"id2label\", None)\n self.label2id = kwargs.pop(\"label2id\", None)\n if self.label2id is not None and not isinstance(self.label2id, dict):\n raise ValueError(\"Argument label2id should be a dictionary.\")\n if self.id2label is not None:\n if not isinstance(self.id2label, dict):\n raise ValueError(\"Argument id2label should be a dictionary.\")\n num_labels = kwargs.pop(\"num_labels\", None)\n if num_labels is not None and len(self.id2label) != num_labels:\n logger.warning(\n f\"You passed along `num_labels={num_labels}` with an incompatible id to label map: \"\n f\"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}.\"\n )\n self.id2label = {int(key): value for key, value in self.id2label.items()}\n # Keys are always strings in JSON so convert ids to int here.\n else:\n self.num_labels = kwargs.pop(\"num_labels\", 2)\n\n if self.torch_dtype is not None and isinstance(self.torch_dtype, str):\n # we will start using self.torch_dtype in v5, but to be consistent with\n # from_pretrained's torch_dtype arg convert it to an actual torch.dtype object\n if is_torch_available():\n import torch\n\n self.torch_dtype = getattr(torch, self.torch_dtype)\n\n # Tokenizer arguments TODO: eventually tokenizer and models should share the same config\n self.tokenizer_class = kwargs.pop(\"tokenizer_class\", None)\n self.prefix = kwargs.pop(\"prefix\", None)\n self.bos_token_id = kwargs.pop(\"bos_token_id\", None)\n self.pad_token_id = kwargs.pop(\"pad_token_id\", None)\n self.eos_token_id = kwargs.pop(\"eos_token_id\", None)\n self.sep_token_id = kwargs.pop(\"sep_token_id\", None)\n\n self.decoder_start_token_id = kwargs.pop(\"decoder_start_token_id\", None)\n\n # task specific arguments\n self.task_specific_params = kwargs.pop(\"task_specific_params\", None)\n\n # regression / multi-label classification\n self.problem_type = kwargs.pop(\"problem_type\", None)\n allowed_problem_types = (\"regression\", \"single_label_classification\", \"multi_label_classification\")\n if self.problem_type is not None and self.problem_type not in allowed_problem_types:\n raise ValueError(\n f\"The config parameter `problem_type` was not understood: received {self.problem_type} \"\n \"but only 'regression', 'single_label_classification' and 'multi_label_classification' are valid.\"\n )\n\n # TPU arguments\n if kwargs.pop(\"xla_device\", None) is not None:\n logger.warning(\n \"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can \"\n \"safely remove it from your `config.json` file.\"\n )\n\n # Name or path to the pretrained checkpoint\n self._name_or_path = str(kwargs.pop(\"name_or_path\", \"\"))\n # Config hash\n self._commit_hash = kwargs.pop(\"_commit_hash\", None)\n\n # Drop the transformers version info\n self.transformers_version = kwargs.pop(\"transformers_version\", None)\n\n # Deal with gradient checkpointing\n if kwargs.get(\"gradient_checkpointing\", False):\n warnings.warn(\n \"Passing `gradient_checkpointing` to a config initialization is deprecated and will be removed in v5 \"\n \"Transformers. Using `model.gradient_checkpointing_enable()` instead, or if you are using the \"\n \"`Trainer` API, pass `gradient_checkpointing=True` in your `TrainingArguments`.\"\n )\n\n # Additional attributes without default values\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n @property\n def name_or_path(self) -> str:\n return getattr(self, \"_name_or_path\", None)\n\n @name_or_path.setter\n def name_or_path(self, value):\n self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)\n\n @property\n def use_return_dict(self) -> bool:\n \"\"\"\n `bool`: Whether or not return [`~utils.ModelOutput`] instead of tuples.\n \"\"\"\n # If torchscript is set, force `return_dict=False` to avoid jit errors\n return self.return_dict and not self.torchscript\n\n @property\n def num_labels(self) -> int:\n \"\"\"\n `int`: The number of labels for classification models.\n \"\"\"\n return len(self.id2label)\n\n @num_labels.setter\n def num_labels(self, num_labels: int):\n if not hasattr(self, \"id2label\") or self.id2label is None or len(self.id2label) != num_labels:\n self.id2label = {i: f\"LABEL_{i}\" for i in range(num_labels)}\n self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))\n\n def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~PretrainedConfig.from_pretrained`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n self._set_token_in_kwargs(kwargs)\n\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = self._create_repo(repo_id, **kwargs)\n files_timestamps = self._get_files_timestamps(save_directory)\n\n # If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be\n # loaded from the Hub.\n if self._auto_class is not None:\n custom_object_save(self, save_directory, config=self)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_config_file = os.path.join(save_directory, CONFIG_NAME)\n\n self.to_json_file(output_config_file, use_diff=True)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n if push_to_hub:\n self._upload_modified_files(\n save_directory,\n repo_id,\n files_timestamps,\n commit_message=commit_message,\n token=kwargs.get(\"token\"),\n )\n\n @staticmethod\n def _set_token_in_kwargs(kwargs, token=None):\n \"\"\"Temporary method to deal with `token` and `use_auth_token`.\n\n This method is to avoid apply the same changes in all model config classes that overwrite `from_pretrained`.\n\n Need to clean up `use_auth_token` in a follow PR.\n \"\"\"\n # Some model config classes like CLIP define their own `from_pretrained` without the new argument `token` yet.\n if token is None:\n token = kwargs.pop(\"token\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n\n if use_auth_token is not None:\n warnings.warn(\n \"The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers.\", FutureWarning\n )\n if token is not None:\n raise ValueError(\n \"`token` and `use_auth_token` are both specified. Please set only the argument `token`.\"\n )\n token = use_auth_token\n\n if token is not None:\n kwargs[\"token\"] = token\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n cache_dir: Optional[Union[str, os.PathLike]] = None,\n force_download: bool = False,\n local_files_only: bool = False,\n token: Optional[Union[str, bool]] = None,\n revision: str = \"main\",\n **kwargs,\n ) -> \"PretrainedConfig\":\n r\"\"\"\n Instantiate a [`PretrainedConfig`] (or a derived class) from a pretrained model configuration.\n\n Args:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n This can be either:\n\n - a string, the *model id* of a pretrained model configuration hosted inside a model repo on\n huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or\n namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`.\n - a path to a *directory* containing a configuration file saved using the\n [`~PretrainedConfig.save_pretrained`] method, e.g., `./my_model_directory/`.\n - a path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`.\n cache_dir (`str` or `os.PathLike`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force to (re-)download the configuration files and override the cached versions if\n they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received file. Attempts to resume the download if such a file\n exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.\n token (`str` or `bool`, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use\n the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n\n <Tip>\n\n To test a pull request you made on the Hub, you can pass `revision=\"refs/pr/<pr_number>\".\n\n </Tip>\n\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n If `False`, then this function returns just the final configuration object.\n\n If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a\n dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the\n part of `kwargs` which has not been used to update `config` and is otherwise ignored.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can\n specify the folder name here.\n kwargs (`Dict[str, Any]`, *optional*):\n The values in kwargs of any keys which are configuration attributes will be used to override the loaded\n values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled\n by the `return_unused_kwargs` keyword parameter.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from this pretrained model.\n\n Examples:\n\n ```python\n # We can't instantiate directly the base class *PretrainedConfig* so let's show the examples on a\n # derived class: BertConfig\n config = BertConfig.from_pretrained(\n \"bert-base-uncased\"\n ) # Download configuration from huggingface.co and cache.\n config = BertConfig.from_pretrained(\n \"./test/saved_model/\"\n ) # E.g. config (or model) was saved using *save_pretrained('./test/saved_model/')*\n config = BertConfig.from_pretrained(\"./test/saved_model/my_configuration.json\")\n config = BertConfig.from_pretrained(\"bert-base-uncased\", output_attentions=True, foo=False)\n assert config.output_attentions == True\n config, unused_kwargs = BertConfig.from_pretrained(\n \"bert-base-uncased\", output_attentions=True, foo=False, return_unused_kwargs=True\n )\n assert config.output_attentions == True\n assert unused_kwargs == {\"foo\": False}\n ```\"\"\"\n kwargs[\"cache_dir\"] = cache_dir\n kwargs[\"force_download\"] = force_download\n kwargs[\"local_files_only\"] = local_files_only\n kwargs[\"revision\"] = revision\n\n cls._set_token_in_kwargs(kwargs, token)\n\n config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"model_type\" in config_dict and hasattr(cls, \"model_type\") and config_dict[\"model_type\"] != cls.model_type:\n logger.warning(\n f\"You are using a model of type {config_dict['model_type']} to instantiate a model of type \"\n f\"{cls.model_type}. This is not supported for all configurations of models and can yield errors.\"\n )\n\n return cls.from_dict(config_dict, **kwargs)\n\n @classmethod\n def get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n \"\"\"\n From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a\n [`PretrainedConfig`] using `from_dict`.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.\n\n Returns:\n `Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.\n\n \"\"\"\n cls._set_token_in_kwargs(kwargs)\n\n original_kwargs = copy.deepcopy(kwargs)\n # Get config dict associated with the base config file\n config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)\n if \"_commit_hash\" in config_dict:\n original_kwargs[\"_commit_hash\"] = config_dict[\"_commit_hash\"]\n\n # That config file may point us toward another config file to use.\n if \"configuration_files\" in config_dict:\n configuration_file = get_configuration_file(config_dict[\"configuration_files\"])\n config_dict, kwargs = cls._get_config_dict(\n pretrained_model_name_or_path, _configuration_file=configuration_file, **original_kwargs\n )\n\n return config_dict, kwargs\n\n @classmethod\n def _get_config_dict(\n cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n cache_dir = kwargs.pop(\"cache_dir\", None)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n token = kwargs.pop(\"token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n trust_remote_code = kwargs.pop(\"trust_remote_code\", None)\n subfolder = kwargs.pop(\"subfolder\", \"\")\n from_pipeline = kwargs.pop(\"_from_pipeline\", None)\n from_auto_class = kwargs.pop(\"_from_auto\", False)\n commit_hash = kwargs.pop(\"_commit_hash\", None)\n\n if trust_remote_code is True:\n logger.warning(\n \"The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is\"\n \" ignored.\"\n )\n\n user_agent = {\"file_type\": \"config\", \"from_auto_class\": from_auto_class}\n if from_pipeline is not None:\n user_agent[\"using_pipeline\"] = from_pipeline\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n is_local = os.path.isdir(pretrained_model_name_or_path)\n if os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):\n # Special case when pretrained_model_name_or_path is a local file\n resolved_config_file = pretrained_model_name_or_path\n is_local = True\n elif is_remote_url(pretrained_model_name_or_path):\n configuration_file = pretrained_model_name_or_path\n resolved_config_file = download_url(pretrained_model_name_or_path)\n else:\n configuration_file = kwargs.pop(\"_configuration_file\", CONFIG_NAME)\n\n try:\n # Load from local folder or from cache or download from model Hub and cache\n resolved_config_file = cached_file(\n pretrained_model_name_or_path,\n configuration_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n token=token,\n user_agent=user_agent,\n revision=revision,\n subfolder=subfolder,\n _commit_hash=commit_hash,\n )\n commit_hash = extract_commit_hash(resolved_config_file, commit_hash)\n except EnvironmentError:\n # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to\n # the original exception.\n raise\n except Exception:\n # For any other exception, we throw a generic error.\n raise EnvironmentError(\n f\"Can't load the configuration of '{pretrained_model_name_or_path}'. If you were trying to load it\"\n \" from 'https://huggingface.co/models', make sure you don't have a local directory with the same\"\n f\" name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory\"\n f\" containing a {configuration_file} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(resolved_config_file)\n config_dict[\"_commit_hash\"] = commit_hash\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(\n f\"It looks like the config file at '{resolved_config_file}' is not a valid JSON file.\"\n )\n\n if is_local:\n logger.info(f\"loading configuration file {resolved_config_file}\")\n else:\n logger.info(f\"loading configuration file {configuration_file} from cache at {resolved_config_file}\")\n\n if \"auto_map\" in config_dict and not is_local:\n config_dict[\"auto_map\"] = add_model_info_to_auto_map(\n config_dict[\"auto_map\"], pretrained_model_name_or_path\n )\n return config_dict, kwargs\n\n @classmethod\n def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from a Python dictionary of parameters.\n\n Args:\n config_dict (`Dict[str, Any]`):\n Dictionary that will be used to instantiate the configuration object. Such a dictionary can be\n retrieved from a pretrained checkpoint by leveraging the [`~PretrainedConfig.get_config_dict`] method.\n kwargs (`Dict[str, Any]`):\n Additional parameters from which to initialize the configuration object.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from those parameters.\n \"\"\"\n return_unused_kwargs = kwargs.pop(\"return_unused_kwargs\", False)\n # Those arguments may be passed along for our internal telemetry.\n # We remove them so they don't appear in `return_unused_kwargs`.\n kwargs.pop(\"_from_auto\", None)\n kwargs.pop(\"_from_pipeline\", None)\n # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.\n if \"_commit_hash\" in kwargs and \"_commit_hash\" in config_dict:\n kwargs[\"_commit_hash\"] = config_dict[\"_commit_hash\"]\n\n config = cls(**config_dict)\n\n if hasattr(config, \"pruned_heads\"):\n config.pruned_heads = {int(key): value for key, value in config.pruned_heads.items()}\n\n # Update config with kwargs if needed\n if \"num_labels\" in kwargs and \"id2label\" in kwargs:\n num_labels = kwargs[\"num_labels\"]\n id2label = kwargs[\"id2label\"] if kwargs[\"id2label\"] is not None else []\n if len(id2label) != num_labels:\n raise ValueError(\n f\"You passed along `num_labels={num_labels }` with an incompatible id to label map: \"\n f\"{kwargs['id2label']}. Since those arguments are inconsistent with each other, you should remove \"\n \"one of them.\"\n )\n to_remove = []\n for key, value in kwargs.items():\n if hasattr(config, key):\n current_attr = getattr(config, key)\n # To authorize passing a custom subconfig as kwarg in models that have nested configs.\n if isinstance(current_attr, PretrainedConfig) and isinstance(value, dict):\n value = current_attr.__class__(**value)\n setattr(config, key, value)\n if key != \"torch_dtype\":\n to_remove.append(key)\n for key in to_remove:\n kwargs.pop(key, None)\n\n logger.info(f\"Model config {config}\")\n if return_unused_kwargs:\n return config, kwargs\n else:\n return config\n\n @classmethod\n def from_json_file(cls, json_file: Union[str, os.PathLike]) -> \"PretrainedConfig\":\n \"\"\"\n Instantiates a [`PretrainedConfig`] from the path to a JSON file of parameters.\n\n Args:\n json_file (`str` or `os.PathLike`):\n Path to the JSON file containing the parameters.\n\n Returns:\n [`PretrainedConfig`]: The configuration object instantiated from that JSON file.\n\n \"\"\"\n config_dict = cls._dict_from_json_file(json_file)\n return cls(**config_dict)\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __eq__(self, other):\n return isinstance(other, PretrainedConfig) and (self.__dict__ == other.__dict__)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n def to_diff_dict(self) -> Dict[str, Any]:\n \"\"\"\n Removes all attributes from config which correspond to the default config attributes for better readability and\n serializes to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n config_dict = self.to_dict()\n\n # get the default config dict\n default_config_dict = PretrainedConfig().to_dict()\n\n # get class specific config dict\n class_config_dict = self.__class__().to_dict() if not self.is_composition else {}\n\n serializable_config_dict = {}\n\n # only serialize values that differ from the default config\n for key, value in config_dict.items():\n if (\n isinstance(getattr(self, key, None), PretrainedConfig)\n and key in class_config_dict\n and isinstance(class_config_dict[key], dict)\n ):\n # For nested configs we need to clean the diff recursively\n diff = recursive_diff_dict(value, class_config_dict[key], config_obj=getattr(self, key, None))\n if \"model_type\" in value:\n # Needs to be set even if it's not in the diff\n diff[\"model_type\"] = value[\"model_type\"]\n if len(diff) > 0:\n serializable_config_dict[key] = diff\n elif (\n key not in default_config_dict\n or key == \"transformers_version\"\n or value != default_config_dict[key]\n or (key in class_config_dict and value != class_config_dict[key])\n ):\n serializable_config_dict[key] = value\n\n if hasattr(self, \"quantization_config\"):\n serializable_config_dict[\"quantization_config\"] = (\n self.quantization_config.to_dict()\n if not isinstance(self.quantization_config, dict)\n else self.quantization_config\n )\n\n self.dict_torch_dtype_to_str(serializable_config_dict)\n\n return serializable_config_dict\n\n def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes this instance to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n if hasattr(self.__class__, \"model_type\"):\n output[\"model_type\"] = self.__class__.model_type\n if \"_auto_class\" in output:\n del output[\"_auto_class\"]\n if \"_commit_hash\" in output:\n del output[\"_commit_hash\"]\n\n # Transformers version when serializing the model\n output[\"transformers_version\"] = __version__\n\n for key, value in output.items():\n # Deal with nested configs like CLIP\n if isinstance(value, PretrainedConfig):\n value = value.to_dict()\n del value[\"transformers_version\"]\n\n output[key] = value\n\n if hasattr(self, \"quantization_config\"):\n output[\"quantization_config\"] = (\n self.quantization_config.to_dict()\n if not isinstance(self.quantization_config, dict)\n else self.quantization_config\n )\n\n self.dict_torch_dtype_to_str(output)\n\n return output\n\n def to_json_string(self, use_diff: bool = True) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Args:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `PretrainedConfig()`\n is serialized to JSON file.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string(use_diff=use_diff))\n\n def update(self, config_dict: Dict[str, Any]):\n \"\"\"\n Updates attributes of this class with attributes from `config_dict`.\n\n Args:\n config_dict (`Dict[str, Any]`): Dictionary of attributes that should be updated for this class.\n \"\"\"\n for key, value in config_dict.items():\n setattr(self, key, value)\n\n def update_from_string(self, update_str: str):\n \"\"\"\n Updates attributes of this class with attributes from `update_str`.\n\n The expected format is ints, floats and strings as is, and for booleans use `true` or `false`. For example:\n \"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index\"\n\n The keys to change have to already exist in the config object.\n\n Args:\n update_str (`str`): String with attributes that should be updated for this class.\n\n \"\"\"\n\n d = dict(x.split(\"=\") for x in update_str.split(\",\"))\n for k, v in d.items():\n if not hasattr(self, k):\n raise ValueError(f\"key {k} isn't in the original config dict\")\n\n old_v = getattr(self, k)\n if isinstance(old_v, bool):\n if v.lower() in [\"true\", \"1\", \"y\", \"yes\"]:\n v = True\n elif v.lower() in [\"false\", \"0\", \"n\", \"no\"]:\n v = False\n else:\n raise ValueError(f\"can't derive true or false from {v} (key {k})\")\n elif isinstance(old_v, int):\n v = int(v)\n elif isinstance(old_v, float):\n v = float(v)\n elif not isinstance(old_v, str):\n raise ValueError(\n f\"You can only update int, float, bool or string values in the config, got {v} for key {k}\"\n )\n\n setattr(self, k, v)\n\n def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:\n \"\"\"\n Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,\n converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *\"float32\"*\n string, which can then be stored in the json format.\n \"\"\"\n if d.get(\"torch_dtype\", None) is not None and not isinstance(d[\"torch_dtype\"], str):\n d[\"torch_dtype\"] = str(d[\"torch_dtype\"]).split(\".\")[1]\n for value in d.values():\n if isinstance(value, dict):\n self.dict_torch_dtype_to_str(value)\n\n @classmethod\n def register_for_auto_class(cls, auto_class=\"AutoConfig\"):\n \"\"\"\n Register this class with a given auto class. This should only be used for custom configurations as the ones in\n the library are already mapped with `AutoConfig`.\n\n <Tip warning={true}>\n\n This API is experimental and may have some slight breaking changes in the next releases.\n\n </Tip>\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"AutoConfig\"`):\n The auto class to register this new configuration with.\n \"\"\"\n if not isinstance(auto_class, str):\n auto_class = auto_class.__name__\n\n import transformers.models.auto as auto_module\n\n if not hasattr(auto_module, auto_class):\n raise ValueError(f\"{auto_class} is not a valid auto class.\")\n\n cls._auto_class = auto_class" }, { "identifier": "logging", "path": "transformers/src/transformers/utils/logging.py", "snippet": "def _get_default_logging_level():\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict():\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info():\ndef set_verbosity_warning():\ndef set_verbosity_debug():\ndef set_verbosity_error():\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs):\ndef warning_once(self, *args, **kwargs):\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar():\ndef disable_progress_bar():\nclass EmptyTqdm:\nclass _tqdm_cls:" } ]
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging
12,443
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UniSpeech model configuration""" logger = logging.get_logger(__name__) UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech }
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ UniSpeech model configuration""" logger = logging.get_logger(__name__) UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/unispeech-large-1500h-cv": ( "https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json" ), # See all UniSpeech models at https://huggingface.co/models?filter=unispeech }
class UniSpeechConfig(PretrainedConfig):
0
2023-11-07 04:23:57+00:00
16k
HKU-BAL/ClairS-TO
src/realign_reads.py
[ { "identifier": "subprocess_popen", "path": "shared/utils.py", "snippet": "BASIC_BASES = set(\"ACGTU\")\nWARNING = '\\033[93m'\nERROR = '\\033[91m'\nENDC = '\\033[0m'\ndef log_error(log):\ndef log_warning(log):\ndef is_file_exists(file_name, suffix=\"\"):\ndef is_folder_exists(folder_name, suffix=\"\"):\ndef legal_range_from(param_name, x, min_num=None, max_num=None, exit_out_of_range=False):\ndef file_path_from(file_name, suffix=\"\", exit_on_not_found=False, sep=\"\", allow_none=False, is_directory=False):\ndef folder_path_from(folder_name, create_not_found=True, exit_on_not_found=False):\ndef is_command_exists(command):\ndef executable_command_string_from(command_to_execute, exit_on_not_found=False):\ndef subprocess_popen(args, stdin=None, stdout=PIPE, stderr=stderr, bufsize=8388608):\ndef str_none(v):\ndef str2bool(v):\ndef region_from(ctg_name, ctg_start=None, ctg_end=None):\ndef reference_sequence_from(samtools_execute_command, fasta_file_path, regions):\ndef vcf_candidates_from(vcf_fn, contig_name=None):\ndef candidate_position_generator_from(\n candidate,\n flanking_base_num,\n begin_to_end\n):\ndef samtools_mpileup_generator_from(\n candidate,\n flanking_base_num,\n begin_to_end\n):\ndef samtools_view_process_from(\n ctg_name,\n ctg_start,\n ctg_end,\n samtools,\n bam_file_path\n):\n def __init__(self, ctg_name=None,\n genotype1=None,\n genotype2=None,\n pos=None,\n ref_base=None,\n alt_base=None,\n candidate=False,\n cigar_count=None,\n confident_variant=False,\n depth=None,\n alt_list=None,\n af=None,\n filter=None,\n af_list=None,\n alt_type_mapping_dict=None,\n extra_infos=\"\",\n qual=None,\n row_str=None):\n def update_info(self, ref_base, alt_base, genotype, extra_infos=\"\"):\n def __init__(self, pos, ref_base, depth, af_list, alt_dict, tumor_alt_dict, extra_infos=\"\"):\n def __init__(self, handle):\n def __del__(self):\nclass Position(object):\nclass AltInfos(object):\nclass TensorStdout(object):" }, { "identifier": "bed_tree_from", "path": "shared/interval_tree.py", "snippet": "def bed_tree_from(bed_file_path,\n expand_region=None,\n contig_name=None,\n bed_ctg_start=None,\n bed_ctg_end=None,\n return_bed_region=False,\n padding=None,\n region=None):\n \"\"\"\n 0-based interval tree [start, end)\n \"\"\"\n\n tree = {}\n if region is not None:\n try:\n ctg_name, start_end = region.split(':')\n ctg_start, ctg_end = int(start_end.split('-')[0]) - 1, int(start_end.split('-')[1]) - 1 # bed format\n except:\n sys.exit(\"[ERROR] Please input the correct format for --region ctg_name:start-end, your input is {}\".format(region))\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid region input: {}\".format(region))\n\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n tree[ctg_name].addi(ctg_start, ctg_end)\n if return_bed_region:\n return tree, None, None\n return tree\n\n if bed_file_path is None or bed_file_path == \"\":\n if return_bed_region:\n return tree, None, None\n return tree\n\n bed_start, bed_end = float('inf'), 0\n unzip_process = subprocess_popen(shlex.split(\"gzip -fdc %s\" % (bed_file_path)))\n for row_id, row in enumerate(unzip_process.stdout):\n if row[0] == '#':\n continue\n columns = row.strip().split()\n\n ctg_name = columns[0]\n if contig_name != None and ctg_name != contig_name:\n continue\n if ctg_name not in tree:\n tree[ctg_name] = IntervalTree()\n\n ctg_start, ctg_end = int(columns[1]), int(columns[2])\n\n if ctg_end < ctg_start or ctg_start < 0 or ctg_end < 0:\n sys.exit(\"[ERROR] Invalid bed input in {}-th row {} {} {}\".format(row_id+1, ctg_name, ctg_start, ctg_end))\n\n if bed_ctg_start and bed_ctg_end:\n if ctg_end < bed_ctg_start or ctg_start > bed_ctg_end:\n continue\n if padding:\n ctg_start += padding\n ctg_end -= padding\n bed_start = min(ctg_start, bed_start)\n bed_end = max(ctg_end, bed_end)\n if ctg_start == ctg_end:\n ctg_end += 1\n\n tree[ctg_name].addi(ctg_start, ctg_end)\n\n unzip_process.stdout.close()\n unzip_process.wait()\n if return_bed_region:\n return tree, bed_start, bed_end\n return tree" }, { "identifier": "IntervalTree", "path": "shared/intervaltree/intervaltree.py", "snippet": "class IntervalTree(MutableSet):\n \"\"\"\n A binary lookup tree of intervals.\n The intervals contained in the tree are represented using ``Interval(a, b, data)`` objects.\n Each such object represents a half-open interval ``[a, b)`` with optional data.\n\n Examples:\n ---------\n\n Initialize a blank tree::\n\n >>> tree = IntervalTree()\n >>> tree\n IntervalTree()\n\n Initialize a tree from an iterable set of Intervals in O(n * log n)::\n\n >>> tree = IntervalTree([Interval(-10, 10), Interval(-20.0, -10.0)])\n >>> tree\n IntervalTree([Interval(-20.0, -10.0), Interval(-10, 10)])\n >>> len(tree)\n 2\n\n Note that this is a set, i.e. repeated intervals are ignored. However,\n Intervals with different data fields are regarded as different::\n\n >>> tree = IntervalTree([Interval(-10, 10), Interval(-10, 10), Interval(-10, 10, \"x\")])\n >>> tree\n IntervalTree([Interval(-10, 10), Interval(-10, 10, 'x')])\n >>> len(tree)\n 2\n\n Insertions::\n >>> tree = IntervalTree()\n >>> tree[0:1] = \"data\"\n >>> tree.add(Interval(10, 20))\n >>> tree.addi(19.9, 20)\n >>> tree\n IntervalTree([Interval(0, 1, 'data'), Interval(10, 20), Interval(19.9, 20)])\n >>> tree.update([Interval(19.9, 20.1), Interval(20.1, 30)])\n >>> len(tree)\n 5\n\n Inserting the same Interval twice does nothing::\n >>> tree = IntervalTree()\n >>> tree[-10:20] = \"arbitrary data\"\n >>> tree[-10:20] = None # Note that this is also an insertion\n >>> tree\n IntervalTree([Interval(-10, 20), Interval(-10, 20, 'arbitrary data')])\n >>> tree[-10:20] = None # This won't change anything\n >>> tree[-10:20] = \"arbitrary data\" # Neither will this\n >>> len(tree)\n 2\n\n Deletions::\n >>> tree = IntervalTree(Interval(b, e) for b, e in [(-10, 10), (-20, -10), (10, 20)])\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(-10, 10), Interval(10, 20)])\n >>> tree.remove(Interval(-10, 10))\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(10, 20)])\n >>> tree.remove(Interval(-10, 10))\n Traceback (most recent call last):\n ...\n ValueError\n >>> tree.discard(Interval(-10, 10)) # Same as remove, but no exception on failure\n >>> tree\n IntervalTree([Interval(-20, -10), Interval(10, 20)])\n\n Delete intervals, overlapping a given point::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.1)\n >>> tree\n IntervalTree([Interval(-1.1, 1.1)])\n\n Delete intervals, overlapping an interval::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_overlap(0, 0.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.7, 1.8)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.6, 1.6) # Null interval does nothing\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_overlap(1.6, 1.5) # Ditto\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n\n Delete intervals, enveloped in the range::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> tree.remove_envelop(-1.0, 1.5)\n >>> tree\n IntervalTree([Interval(-1.1, 1.1), Interval(0.5, 1.7)])\n >>> tree.remove_envelop(-1.1, 1.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_envelop(0.5, 1.5)\n >>> tree\n IntervalTree([Interval(0.5, 1.7)])\n >>> tree.remove_envelop(0.5, 1.7)\n >>> tree\n IntervalTree()\n\n Point queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree[-1.1] == set([Interval(-1.1, 1.1)])\n >>> assert tree.at(1.1) == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)]) # Same as tree[1.1]\n >>> assert tree.at(1.5) == set([Interval(0.5, 1.7)]) # Same as tree[1.5]\n\n Interval overlap queries\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree.overlap(1.7, 1.8) == set()\n >>> assert tree.overlap(1.5, 1.8) == set([Interval(0.5, 1.7)])\n >>> assert tree[1.5:1.8] == set([Interval(0.5, 1.7)]) # same as previous\n >>> assert tree.overlap(1.1, 1.8) == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree[1.1:1.8] == set([Interval(-0.5, 1.5), Interval(0.5, 1.7)]) # same as previous\n\n Interval envelop queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> assert tree.envelop(-0.5, 0.5) == set()\n >>> assert tree.envelop(-0.5, 1.5) == set([Interval(-0.5, 1.5)])\n\n Membership queries::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> Interval(-0.5, 0.5) in tree\n False\n >>> Interval(-1.1, 1.1) in tree\n True\n >>> Interval(-1.1, 1.1, \"x\") in tree\n False\n >>> tree.overlaps(-1.1)\n True\n >>> tree.overlaps(1.7)\n False\n >>> tree.overlaps(1.7, 1.8)\n False\n >>> tree.overlaps(-1.2, -1.1)\n False\n >>> tree.overlaps(-1.2, -1.0)\n True\n\n Sizing::\n\n >>> tree = IntervalTree([Interval(-1.1, 1.1), Interval(-0.5, 1.5), Interval(0.5, 1.7)])\n >>> len(tree)\n 3\n >>> tree.is_empty()\n False\n >>> IntervalTree().is_empty()\n True\n >>> not tree\n False\n >>> not IntervalTree()\n True\n >>> print(tree.begin()) # using print() because of floats in Python 2.6\n -1.1\n >>> print(tree.end()) # ditto\n 1.7\n\n Iteration::\n\n >>> tree = IntervalTree([Interval(-11, 11), Interval(-5, 15), Interval(5, 17)])\n >>> [iv.begin for iv in sorted(tree)]\n [-11, -5, 5]\n >>> assert tree.items() == set([Interval(-5, 15), Interval(-11, 11), Interval(5, 17)])\n\n Copy- and typecasting, pickling::\n\n >>> tree0 = IntervalTree([Interval(0, 1, \"x\"), Interval(1, 2, [\"x\"])])\n >>> tree1 = IntervalTree(tree0) # Shares Interval objects\n >>> tree2 = tree0.copy() # Shallow copy (same as above, as Intervals are singletons)\n >>> import pickle\n >>> tree3 = pickle.loads(pickle.dumps(tree0)) # Deep copy\n >>> list(tree0[1])[0].data[0] = \"y\" # affects shallow copies, but not deep copies\n >>> tree0\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree1\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree2\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['y'])])\n >>> tree3\n IntervalTree([Interval(0, 1, 'x'), Interval(1, 2, ['x'])])\n\n Equality testing::\n\n >>> IntervalTree([Interval(0, 1)]) == IntervalTree([Interval(0, 1)])\n True\n >>> IntervalTree([Interval(0, 1)]) == IntervalTree([Interval(0, 1, \"x\")])\n False\n \"\"\"\n @classmethod\n def from_tuples(cls, tups):\n \"\"\"\n Create a new IntervalTree from an iterable of 2- or 3-tuples,\n where the tuple lists begin, end, and optionally data.\n \"\"\"\n ivs = [Interval(*t) for t in tups]\n return IntervalTree(ivs)\n\n def __init__(self, intervals=None):\n \"\"\"\n Set up a tree. If intervals is provided, add all the intervals\n to the tree.\n\n Completes in O(n*log n) time.\n \"\"\"\n intervals = set(intervals) if intervals is not None else set()\n for iv in intervals:\n if iv.is_null():\n raise ValueError(\n \"IntervalTree: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n self.all_intervals = intervals\n self.top_node = Node.from_intervals(self.all_intervals)\n self.boundary_table = SortedDict()\n for iv in self.all_intervals:\n self._add_boundaries(iv)\n\n def copy(self):\n \"\"\"\n Construct a new IntervalTree using shallow copies of the\n intervals in the source tree.\n\n Completes in O(n*log n) time.\n :rtype: IntervalTree\n \"\"\"\n return IntervalTree(iv.copy() for iv in self)\n\n def _add_boundaries(self, interval):\n \"\"\"\n Records the boundaries of the interval in the boundary table.\n \"\"\"\n begin = interval.begin\n end = interval.end\n if begin in self.boundary_table:\n self.boundary_table[begin] += 1\n else:\n self.boundary_table[begin] = 1\n\n if end in self.boundary_table:\n self.boundary_table[end] += 1\n else:\n self.boundary_table[end] = 1\n\n def _remove_boundaries(self, interval):\n \"\"\"\n Removes the boundaries of the interval from the boundary table.\n \"\"\"\n begin = interval.begin\n end = interval.end\n if self.boundary_table[begin] == 1:\n del self.boundary_table[begin]\n else:\n self.boundary_table[begin] -= 1\n\n if self.boundary_table[end] == 1:\n del self.boundary_table[end]\n else:\n self.boundary_table[end] -= 1\n\n def add(self, interval):\n \"\"\"\n Adds an interval to the tree, if not already present.\n\n Completes in O(log n) time.\n \"\"\"\n if interval in self:\n return\n\n if interval.is_null():\n raise ValueError(\n \"IntervalTree: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(interval)\n )\n\n if not self.top_node:\n self.top_node = Node.from_interval(interval)\n else:\n self.top_node = self.top_node.add(interval)\n self.all_intervals.add(interval)\n self._add_boundaries(interval)\n append = add\n\n def addi(self, begin, end, data=None):\n \"\"\"\n Shortcut for add(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.add(Interval(begin, end, data))\n appendi = addi\n\n def update(self, intervals):\n \"\"\"\n Given an iterable of intervals, add them to the tree.\n\n Completes in O(m*log(n+m), where m = number of intervals to\n add.\n \"\"\"\n for iv in intervals:\n self.add(iv)\n\n def remove(self, interval):\n \"\"\"\n Removes an interval from the tree, if present. If not, raises\n ValueError.\n\n Completes in O(log n) time.\n \"\"\"\n #self.verify()\n if interval not in self:\n #print(self.all_intervals)\n raise ValueError\n self.top_node = self.top_node.remove(interval)\n self.all_intervals.remove(interval)\n self._remove_boundaries(interval)\n #self.verify()\n\n def removei(self, begin, end, data=None):\n \"\"\"\n Shortcut for remove(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.remove(Interval(begin, end, data))\n\n def discard(self, interval):\n \"\"\"\n Removes an interval from the tree, if present. If not, does\n nothing.\n\n Completes in O(log n) time.\n \"\"\"\n if interval not in self:\n return\n self.all_intervals.discard(interval)\n self.top_node = self.top_node.discard(interval)\n self._remove_boundaries(interval)\n\n def discardi(self, begin, end, data=None):\n \"\"\"\n Shortcut for discard(Interval(begin, end, data)).\n\n Completes in O(log n) time.\n \"\"\"\n return self.discard(Interval(begin, end, data))\n\n def difference(self, other):\n \"\"\"\n Returns a new tree, comprising all intervals in self but not\n in other.\n \"\"\"\n ivs = set()\n for iv in self:\n if iv not in other:\n ivs.add(iv)\n return IntervalTree(ivs)\n\n def difference_update(self, other):\n \"\"\"\n Removes all intervals in other from self.\n \"\"\"\n for iv in other:\n self.discard(iv)\n\n def union(self, other):\n \"\"\"\n Returns a new tree, comprising all intervals from self\n and other.\n \"\"\"\n return IntervalTree(set(self).union(other))\n\n def intersection(self, other):\n \"\"\"\n Returns a new tree of all intervals common to both self and\n other.\n \"\"\"\n ivs = set()\n shorter, longer = sorted([self, other], key=len)\n for iv in shorter:\n if iv in longer:\n ivs.add(iv)\n return IntervalTree(ivs)\n\n def intersection_update(self, other):\n \"\"\"\n Removes intervals from self unless they also exist in other.\n \"\"\"\n ivs = list(self)\n for iv in ivs:\n if iv not in other:\n self.remove(iv)\n\n def symmetric_difference(self, other):\n \"\"\"\n Return a tree with elements only in self or other but not\n both.\n \"\"\"\n if not isinstance(other, set): other = set(other)\n me = set(self)\n ivs = me.difference(other).union(other.difference(me))\n return IntervalTree(ivs)\n\n def symmetric_difference_update(self, other):\n \"\"\"\n Throws out all intervals except those only in self or other,\n not both.\n \"\"\"\n other = set(other)\n ivs = list(self)\n for iv in ivs:\n if iv in other:\n self.remove(iv)\n other.remove(iv)\n self.update(other)\n\n def remove_overlap(self, begin, end=None):\n \"\"\"\n Removes all intervals overlapping the given point or range.\n\n Completes in O((r+m)*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * r = size of the search range (this is 1 for a point)\n \"\"\"\n hitlist = self.at(begin) if end is None else self.overlap(begin, end)\n for iv in hitlist:\n self.remove(iv)\n\n def remove_envelop(self, begin, end):\n \"\"\"\n Removes all intervals completely enveloped in the given range.\n\n Completes in O((r+m)*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * r = size of the search range\n \"\"\"\n hitlist = self.envelop(begin, end)\n for iv in hitlist:\n self.remove(iv)\n\n def chop(self, begin, end, datafunc=None):\n \"\"\"\n Like remove_envelop(), but trims back Intervals hanging into\n the chopped area so that nothing overlaps.\n \"\"\"\n insertions = set()\n begin_hits = [iv for iv in self.at(begin) if iv.begin < begin]\n end_hits = [iv for iv in self.at(end) if iv.end > end]\n\n if datafunc:\n for iv in begin_hits:\n insertions.add(Interval(iv.begin, begin, datafunc(iv, True)))\n for iv in end_hits:\n insertions.add(Interval(end, iv.end, datafunc(iv, False)))\n else:\n for iv in begin_hits:\n insertions.add(Interval(iv.begin, begin, iv.data))\n for iv in end_hits:\n insertions.add(Interval(end, iv.end, iv.data))\n\n self.remove_envelop(begin, end)\n self.difference_update(begin_hits)\n self.difference_update(end_hits)\n self.update(insertions)\n\n def slice(self, point, datafunc=None):\n \"\"\"\n Split Intervals that overlap point into two new Intervals. if\n specified, uses datafunc(interval, islower=True/False) to\n set the data field of the new Intervals.\n :param point: where to slice\n :param datafunc(interval, isupper): callable returning a new\n value for the interval's data field\n \"\"\"\n hitlist = set(iv for iv in self.at(point) if iv.begin < point)\n insertions = set()\n if datafunc:\n for iv in hitlist:\n insertions.add(Interval(iv.begin, point, datafunc(iv, True)))\n insertions.add(Interval(point, iv.end, datafunc(iv, False)))\n else:\n for iv in hitlist:\n insertions.add(Interval(iv.begin, point, iv.data))\n insertions.add(Interval(point, iv.end, iv.data))\n self.difference_update(hitlist)\n self.update(insertions)\n\n def clear(self):\n \"\"\"\n Empties the tree.\n\n Completes in O(1) tine.\n \"\"\"\n self.__init__()\n\n def find_nested(self):\n \"\"\"\n Returns a dictionary mapping parent intervals to sets of\n intervals overlapped by and contained in the parent.\n\n Completes in O(n^2) time.\n :rtype: dict of [Interval, set of Interval]\n \"\"\"\n result = {}\n\n def add_if_nested():\n if parent.contains_interval(child):\n if parent not in result:\n result[parent] = set()\n result[parent].add(child)\n\n long_ivs = sorted(self.all_intervals, key=Interval.length, reverse=True)\n for i, parent in enumerate(long_ivs):\n for child in long_ivs[i + 1:]:\n add_if_nested()\n return result\n\n def overlaps(self, begin, end=None):\n \"\"\"\n Returns whether some interval in the tree overlaps the given\n point or range.\n\n Completes in O(r*log n) time, where r is the size of the\n search range.\n :rtype: bool\n \"\"\"\n if end is not None:\n return self.overlaps_range(begin, end)\n elif isinstance(begin, Number):\n return self.overlaps_point(begin)\n else:\n return self.overlaps_range(begin.begin, begin.end)\n\n def overlaps_point(self, p):\n \"\"\"\n Returns whether some interval in the tree overlaps p.\n\n Completes in O(log n) time.\n :rtype: bool\n \"\"\"\n if self.is_empty():\n return False\n return bool(self.top_node.contains_point(p))\n\n def overlaps_range(self, begin, end):\n \"\"\"\n Returns whether some interval in the tree overlaps the given\n range. Returns False if given a null interval over which to\n test.\n\n Completes in O(r*log n) time, where r is the range length and n\n is the table size.\n :rtype: bool\n \"\"\"\n if self.is_empty():\n return False\n elif begin >= end:\n return False\n elif self.overlaps_point(begin):\n return True\n return any(\n self.overlaps_point(bound)\n for bound in self.boundary_table\n if begin < bound < end\n )\n\n def split_overlaps(self):\n \"\"\"\n Finds all intervals with overlapping ranges and splits them\n along the range boundaries.\n\n Completes in worst-case O(n^2*log n) time (many interval\n boundaries are inside many intervals), best-case O(n*log n)\n time (small number of overlaps << n per interval).\n \"\"\"\n if not self:\n return\n if len(self.boundary_table) == 2:\n return\n\n bounds = sorted(self.boundary_table) # get bound locations\n\n new_ivs = set()\n for lbound, ubound in zip(bounds[:-1], bounds[1:]):\n for iv in self[lbound]:\n new_ivs.add(Interval(lbound, ubound, iv.data))\n\n self.__init__(new_ivs)\n\n def merge_overlaps(self, data_reducer=None, data_initializer=None, strict=True):\n \"\"\"\n Finds all intervals with overlapping ranges and merges them\n into a single interval. If provided, uses data_reducer and\n data_initializer with similar semantics to Python's built-in\n reduce(reducer_func[, initializer]), as follows:\n\n If data_reducer is set to a function, combines the data\n fields of the Intervals with\n current_reduced_data = data_reducer(current_reduced_data, new_data)\n If data_reducer is None, the merged Interval's data\n field will be set to None, ignoring all the data fields\n of the merged Intervals.\n\n On encountering the first Interval to merge, if\n data_initializer is None (default), uses the first\n Interval's data field as the first value for\n current_reduced_data. If data_initializer is not None,\n current_reduced_data is set to a shallow copy of\n data_initializer created with copy.copy(data_initializer).\n\n If strict is True (default), intervals are only merged if\n their ranges actually overlap; adjacent, touching intervals\n will not be merged. If strict is False, intervals are merged\n even if they are only end-to-end adjacent.\n\n Completes in O(n*logn).\n \"\"\"\n if not self:\n return\n\n sorted_intervals = sorted(self.all_intervals) # get sorted intervals\n merged = []\n # use mutable object to allow new_series() to modify it\n current_reduced = [None]\n higher = None # iterating variable, which new_series() needs access to\n\n def new_series():\n if data_initializer is None:\n current_reduced[0] = higher.data\n merged.append(higher)\n return\n else: # data_initializer is not None\n current_reduced[0] = copy(data_initializer)\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n merged.append(Interval(higher.begin, higher.end, current_reduced[0]))\n\n for higher in sorted_intervals:\n if merged: # series already begun\n lower = merged[-1]\n if (higher.begin < lower.end or\n not strict and higher.begin == lower.end): # should merge\n upper_bound = max(lower.end, higher.end)\n if data_reducer is not None:\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n else: # annihilate the data, since we don't know how to merge it\n current_reduced[0] = None\n merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])\n else:\n new_series()\n else: # not merged; is first of Intervals to merge\n new_series()\n\n self.__init__(merged)\n\n def merge_equals(self, data_reducer=None, data_initializer=None):\n \"\"\"\n Finds all intervals with equal ranges and merges them\n into a single interval. If provided, uses data_reducer and\n data_initializer with similar semantics to Python's built-in\n reduce(reducer_func[, initializer]), as follows:\n\n If data_reducer is set to a function, combines the data\n fields of the Intervals with\n current_reduced_data = data_reducer(current_reduced_data, new_data)\n If data_reducer is None, the merged Interval's data\n field will be set to None, ignoring all the data fields\n of the merged Intervals.\n\n On encountering the first Interval to merge, if\n data_initializer is None (default), uses the first\n Interval's data field as the first value for\n current_reduced_data. If data_initializer is not None,\n current_reduced_data is set to a shallow copy of\n data_initiazer created with\n copy.copy(data_initializer).\n\n Completes in O(n*logn).\n \"\"\"\n if not self:\n return\n\n sorted_intervals = sorted(self.all_intervals) # get sorted intervals\n merged = []\n # use mutable object to allow new_series() to modify it\n current_reduced = [None]\n higher = None # iterating variable, which new_series() needs access to\n\n def new_series():\n if data_initializer is None:\n current_reduced[0] = higher.data\n merged.append(higher)\n return\n else: # data_initializer is not None\n current_reduced[0] = copy(data_initializer)\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n merged.append(Interval(higher.begin, higher.end, current_reduced[0]))\n\n for higher in sorted_intervals:\n if merged: # series already begun\n lower = merged[-1]\n if higher.range_matches(lower): # should merge\n upper_bound = max(lower.end, higher.end)\n if data_reducer is not None:\n current_reduced[0] = data_reducer(current_reduced[0], higher.data)\n else: # annihilate the data, since we don't know how to merge it\n current_reduced[0] = None\n merged[-1] = Interval(lower.begin, upper_bound, current_reduced[0])\n else:\n new_series()\n else: # not merged; is first of Intervals to merge\n new_series()\n\n self.__init__(merged)\n\n def items(self):\n \"\"\"\n Constructs and returns a set of all intervals in the tree.\n\n Completes in O(n) time.\n :rtype: set of Interval\n \"\"\"\n return set(self.all_intervals)\n\n def is_empty(self):\n \"\"\"\n Returns whether the tree is empty.\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n return 0 == len(self)\n\n def at(self, p):\n \"\"\"\n Returns the set of all intervals that contain p.\n\n Completes in O(m + log n) time, where:\n * n = size of the tree\n * m = number of matches\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n return root.search_point(p, set())\n\n def envelop(self, begin, end=None):\n \"\"\"\n Returns the set of all intervals fully contained in the range\n [begin, end).\n\n Completes in O(m + k*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n if end is None:\n iv = begin\n return self.envelop(iv.begin, iv.end)\n elif begin >= end:\n return set()\n result = root.search_point(begin, set()) # bound_begin might be greater\n boundary_table = self.boundary_table\n bound_begin = boundary_table.bisect_left(begin)\n bound_end = boundary_table.bisect_left(end) # up to, but not including end\n result.update(root.search_overlap(\n # slice notation is slightly slower\n boundary_table.keys()[index] for index in xrange(bound_begin, bound_end)\n ))\n\n # TODO: improve envelop() to use node info instead of less-efficient filtering\n result = set(\n iv for iv in result\n if iv.begin >= begin and iv.end <= end\n )\n return result\n\n def overlap(self, begin, end=None):\n \"\"\"\n Returns a set of all intervals overlapping the given range.\n\n Completes in O(m + k*log n) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range\n :rtype: set of Interval\n \"\"\"\n root = self.top_node\n if not root:\n return set()\n if end is None:\n iv = begin\n return self.overlap(iv.begin, iv.end)\n elif begin >= end:\n return set()\n result = root.search_point(begin, set()) # bound_begin might be greater\n boundary_table = self.boundary_table\n bound_begin = boundary_table.bisect_left(begin)\n bound_end = boundary_table.bisect_left(end) # up to, but not including end\n result.update(root.search_overlap(\n # slice notation is slightly slower\n boundary_table.keys()[index] for index in xrange(bound_begin, bound_end)\n ))\n return result\n\n def begin(self):\n \"\"\"\n Returns the lower bound of the first interval in the tree.\n\n Completes in O(1) time.\n \"\"\"\n if not self.boundary_table:\n return 0\n return self.boundary_table.keys()[0]\n\n def end(self):\n \"\"\"\n Returns the upper bound of the last interval in the tree.\n\n Completes in O(1) time.\n \"\"\"\n if not self.boundary_table:\n return 0\n return self.boundary_table.keys()[-1]\n\n def range(self):\n \"\"\"\n Returns a minimum-spanning Interval that encloses all the\n members of this IntervalTree. If the tree is empty, returns\n null Interval.\n :rtype: Interval\n \"\"\"\n return Interval(self.begin(), self.end())\n\n def span(self):\n \"\"\"\n Returns the length of the minimum-spanning Interval that\n encloses all the members of this IntervalTree. If the tree\n is empty, return 0.\n \"\"\"\n if not self:\n return 0\n return self.end() - self.begin()\n\n def print_structure(self, tostring=False):\n \"\"\"\n ## FOR DEBUGGING ONLY ##\n Pretty-prints the structure of the tree.\n If tostring is true, prints nothing and returns a string.\n :rtype: None or str\n \"\"\"\n if self.top_node:\n return self.top_node.print_structure(tostring=tostring)\n else:\n result = \"<empty IntervalTree>\"\n if not tostring:\n print(result)\n else:\n return result\n\n def verify(self):\n \"\"\"\n ## FOR DEBUGGING ONLY ##\n Checks the table to ensure that the invariants are held.\n \"\"\"\n if self.all_intervals:\n ## top_node.all_children() == self.all_intervals\n try:\n assert self.top_node.all_children() == self.all_intervals\n except AssertionError as e:\n print(\n 'Error: the tree and the membership set are out of sync!'\n )\n tivs = set(self.top_node.all_children())\n print('top_node.all_children() - all_intervals:')\n try:\n pprint\n except NameError:\n from pprint import pprint\n pprint(tivs - self.all_intervals)\n print('all_intervals - top_node.all_children():')\n pprint(self.all_intervals - tivs)\n raise e\n\n ## All members are Intervals\n for iv in self:\n assert isinstance(iv, Interval), (\n \"Error: Only Interval objects allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n\n ## No null intervals\n for iv in self:\n assert not iv.is_null(), (\n \"Error: Null Interval objects not allowed in IntervalTree:\"\n \" {0}\".format(iv)\n )\n\n ## Reconstruct boundary_table\n bound_check = {}\n for iv in self:\n if iv.begin in bound_check:\n bound_check[iv.begin] += 1\n else:\n bound_check[iv.begin] = 1\n if iv.end in bound_check:\n bound_check[iv.end] += 1\n else:\n bound_check[iv.end] = 1\n\n ## Reconstructed boundary table (bound_check) ==? boundary_table\n assert set(self.boundary_table.keys()) == set(bound_check.keys()),\\\n 'Error: boundary_table is out of sync with ' \\\n 'the intervals in the tree!'\n\n # For efficiency reasons this should be iteritems in Py2, but we\n # don't care much for efficiency in debug methods anyway.\n for key, val in self.boundary_table.items():\n assert bound_check[key] == val, \\\n 'Error: boundary_table[{0}] should be {1},' \\\n ' but is {2}!'.format(\n key, bound_check[key], val)\n\n ## Internal tree structure\n self.top_node.verify(set())\n else:\n ## Verify empty tree\n assert not self.boundary_table, \\\n \"Error: boundary table should be empty!\"\n assert self.top_node is None, \\\n \"Error: top_node isn't None!\"\n\n def score(self, full_report=False):\n \"\"\"\n Returns a number between 0 and 1, indicating how suboptimal the tree\n is. The lower, the better. Roughly, this number represents the\n fraction of flawed Intervals in the tree.\n :rtype: float\n \"\"\"\n if len(self) <= 2:\n return 0.0\n\n n = len(self)\n m = self.top_node.count_nodes()\n\n def s_center_score():\n \"\"\"\n Returns a normalized score, indicating roughly how many times\n intervals share s_center with other intervals. Output is full-scale\n from 0 to 1.\n :rtype: float\n \"\"\"\n raw = n - m\n maximum = n - 1\n return raw / float(maximum)\n\n report = {\n \"depth\": self.top_node.depth_score(n, m),\n \"s_center\": s_center_score(),\n }\n cumulative = max(report.values())\n report[\"_cumulative\"] = cumulative\n if full_report:\n return report\n return cumulative\n\n\n def __getitem__(self, index):\n \"\"\"\n Returns a set of all intervals overlapping the given index or\n slice.\n\n Completes in O(k * log(n) + m) time, where:\n * n = size of the tree\n * m = number of matches\n * k = size of the search range (this is 1 for a point)\n :rtype: set of Interval\n \"\"\"\n try:\n start, stop = index.start, index.stop\n if start is None:\n start = self.begin()\n if stop is None:\n return set(self)\n if stop is None:\n stop = self.end()\n return self.overlap(start, stop)\n except AttributeError:\n return self.at(index)\n\n def __setitem__(self, index, value):\n \"\"\"\n Adds a new interval to the tree. A shortcut for\n add(Interval(index.start, index.stop, value)).\n\n If an identical Interval object with equal range and data\n already exists, does nothing.\n\n Completes in O(log n) time.\n \"\"\"\n self.addi(index.start, index.stop, value)\n\n def __delitem__(self, point):\n \"\"\"\n Delete all items overlapping point.\n \"\"\"\n self.remove_overlap(point)\n\n def __contains__(self, item):\n \"\"\"\n Returns whether item exists as an Interval in the tree.\n This method only returns True for exact matches; for\n overlaps, see the overlaps() method.\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n # Removed point-checking code; it might trick the user into\n # thinking that this is O(1), which point-checking isn't.\n #if isinstance(item, Interval):\n return item in self.all_intervals\n #else:\n # return self.contains_point(item)\n\n def containsi(self, begin, end, data=None):\n \"\"\"\n Shortcut for (Interval(begin, end, data) in tree).\n\n Completes in O(1) time.\n :rtype: bool\n \"\"\"\n return Interval(begin, end, data) in self\n\n def __iter__(self):\n \"\"\"\n Returns an iterator over all the intervals in the tree.\n\n Completes in O(1) time.\n :rtype: collections.Iterable[Interval]\n \"\"\"\n return self.all_intervals.__iter__()\n iter = __iter__\n\n def __len__(self):\n \"\"\"\n Returns how many intervals are in the tree.\n\n Completes in O(1) time.\n :rtype: int\n \"\"\"\n return len(self.all_intervals)\n\n def __eq__(self, other):\n \"\"\"\n Whether two IntervalTrees are equal.\n\n Completes in O(n) time if sizes are equal; O(1) time otherwise.\n :rtype: bool\n \"\"\"\n return (\n isinstance(other, IntervalTree) and\n self.all_intervals == other.all_intervals\n )\n\n def __repr__(self):\n \"\"\"\n :rtype: str\n \"\"\"\n ivs = sorted(self)\n if not ivs:\n return \"IntervalTree()\"\n else:\n return \"IntervalTree({0})\".format(ivs)\n\n __str__ = __repr__\n\n def __reduce__(self):\n \"\"\"\n For pickle-ing.\n :rtype: tuple\n \"\"\"\n return IntervalTree, (sorted(self.all_intervals),)" } ]
import sys import os import shlex import ctypes import re import subprocess import shared.param as param from subprocess import PIPE from argparse import ArgumentParser, SUPPRESS from collections import defaultdict from shared.utils import subprocess_popen, reference_sequence_from, IUPAC_base_to_ACGT_base_dict as BASE2ACGT, log_error from shared.interval_tree import bed_tree_from from shared.intervaltree.intervaltree import IntervalTree
12,982
class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base):
# BSD 3-Clause License # # Copyright 2023 The University of Hong Kong, Department of Computer Science # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. realign_chunk_size = 5000 min_dbg_mapping_quality = min_dbg_base_quality = 20 region_expansion_in_bp = expand_align_ref_region = 20 min_windows_distance = expand_align_ref_region * 4 max_window_size = max_region_reads_num = 1000 expandReferenceRegion = 100000 realigner_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/realigner',))) dbg_mod = os.path.join(*(os.path.split(__file__)[:-1] + ('realign/debruijn_graph',))) if not os.path.exists(realigner_mod) or not os.path.exists(dbg_mod): # try to find modules in clair3 python_path = subprocess.run('which python', stdout=subprocess.PIPE, shell=True).stdout.decode().rstrip() conda_prefix = os.path.dirname(os.path.dirname(python_path)) clair3_realign_path = os.path.join(conda_prefix, 'bin', 'preprocess', 'realign') clair3_realigner_mod = os.path.join(clair3_realign_path, 'realigner') clair3_dbg_mod = os.path.join(clair3_realign_path, 'debruijn_graph') if os.path.exists(clair3_realigner_mod) and os.path.exists(clair3_dbg_mod): realigner_mod = clair3_realigner_mod dbg_mod = clair3_dbg_mod else: print(log_error("[ERROR] `realigner` or `debruijn_graph` submodule not found in conda environment, pls install clair3-illumina package!")) sys.exit(1) realigner = ctypes.cdll.LoadLibrary(realigner_mod) dbg = ctypes.cdll.LoadLibrary(dbg_mod) class StructPointer(ctypes.Structure): _fields_ = [("position", ctypes.c_int * max_region_reads_num), ("cigar_string", ctypes.c_char_p * max_region_reads_num), ] class DBGPointer(ctypes.Structure): _fields_ = [("consensus_size", ctypes.c_int), ("consensus", ctypes.c_char_p * 200), ] # Read class for storing read information cigar_indel_re = r"(\d+)(D)" cigarRe = r"(\d+)([MIDNSHP=X])" graph_min_mapping_quality = 14 def get_len(seq, cigar): if 'D' not in cigar: return len(seq) indel_length = 0 for m in re.finditer(cigar_indel_re, cigar): indel_length += int(m.group(1)) return len(seq) + indel_length def print_ed(s1, s2): match_str = "" for x, y in zip(s1, s2): if x == y: match_str += "|" else: match_str += " " print(s1) print(match_str) print(s2) class Read(object): def __init__(self, read_start, seq, cigar, mapping_quality, base_quality, strand, raw_base_quality=None, unalign=False, read_name=None, read_id=None, flag=None, RNEXT=0, PNEXT=0, TLEN=0, phasing=None): self.read_start = read_start self.cigar = cigar self.mapping_quality = mapping_quality self.seq = seq self.base_quality = base_quality self.read_id = read_id self.read_end = self.read_start + get_len(seq, cigar) self.strand = strand self.graph_mq = True if self.mapping_quality >= graph_min_mapping_quality else False self.raw_base_quality = raw_base_quality self.read_name = read_name self.region = {} self.region_cigar = None self.region_start = None self.flag = str(flag) self.RNEXT = RNEXT self.PNEXT = PNEXT self.TLEN = PNEXT self.test_pos = None self.best_cigar = cigar self.best_pos = read_start self.best_align_score = None self.phasing = phasing def set_realign_flag(self): self.unalign = True def count_align_score(self, cigar): score = 0 for m in re.finditer(cigarRe, cigar): l, op, = int(m.group(1)), m.group(2) if op in 'MX=S': continue elif op in 'ID': score += l return score def set_realignment_info(self, region_start, realignment_cigar, realignment_start): realignment_cigar = realignment_cigar.replace('X', 'M') if realignment_cigar == self.cigar and realignment_start == self.read_start: return if self.best_align_score and realignment_cigar == self.best_cigar and realignment_start == self.best_pos: return realignment_align_score = self.count_align_score(realignment_cigar) if not self.best_align_score or realignment_align_score >= self.best_align_score: self.best_cigar = realignment_cigar self.best_pos = realignment_start self.best_align_score = realignment_align_score def decode_region(self, region_str): if region_str == '-' or '-' not in region_str: return region_str = region_str.rstrip().split('_') for region in region_str: region, cigar, pos = region.split('-') region, pos = int(region), int(pos) self.region[region] = [cigar, pos] def byte(x): return bytes(x, encoding="utf8") def find_max_overlap_index(query_region, search_regions): def overlap_length(region1, region2): return max(0, (min(region1[1], region2[1]) - max(region1[0], region2[0]))) overlap_lengths = [overlap_length(query_region, search_region) for search_region in search_regions] argmax = max(range(len(search_regions)), key=lambda idx: overlap_lengths[idx]) return None if overlap_lengths[argmax] == 0 else argmax def get_reference_seq(sequence, start, end, reference_start_0_based): if end < start: end, start = start, end return sequence[start - reference_start_0_based: end - reference_start_0_based] def phredscore2raw_score(qual): return ord(qual) - 33 def evc_base_from(base):
return base if base == "N" else BASE2ACGT[base]
3
2023-11-07 04:39:16+00:00
16k
the-siesta-group/edfio
tests/test_edfplus_header.py
[ { "identifier": "AnonymizedDateError", "path": "edfio/edf.py", "snippet": "class AnonymizedDateError(ValueError):\n \"\"\"Raised when trying to access an anonymized startdate or birthdate.\"\"\"" }, { "identifier": "Edf", "path": "edfio/edf.py", "snippet": "class Edf:\n \"\"\"Python representation of an EDF file.\n\n EDF header fields are exposed as properties with appropriate data types (i.e.,\n string, numeric, date, or time objects). Fields that might break the file on\n modification (i.e., `version`, `bytes_in_header_record`, `reserved`,\n `num_data_records`, `data_record_duration`, and `num_signals`) can not be set after\n instantiation.\n\n Note that the startdate has to be set via the parameter `recording`.\n\n For writing an EDF file with a non-integer seconds duration, currently an\n appropriate value for `data_record_duration` has to be provided manually.\n\n Parameters\n ----------\n signals : Sequence[EdfSignal]\n The (non-annotation) signals to be contained in the EDF file.\n patient : Patient | None, default: None\n The \"local patient identification\", containing patient code, sex, birthdate,\n name, and optional additional fields. If `None`, the field is set to `X X X X`\n in accordance with EDF+ specs.\n recording : Recording | None, default: None\n The \"local recording identification\", containing recording startdate, hospital\n administration code, investigator/technical code, equipment code, and optional\n additional fields. If `None`, the field is set to `Startdate X X X X` in\n accordance with EDF+ specs.\n starttime : datetime.time | None, default: None\n The starttime of the recording. If `None`, `00.00.00` is used. If `starttime`\n contains microseconds, an EDF+C file is created.\n data_record_duration : float | None, default: None\n The duration of each data record in seconds. If `None`, an appropriate value is\n chosen automatically.\n annotations : Iterable[EdfAnnotation] | None, default: None\n The annotations, consisting of onset, duration (optional), and text. If not\n `None`, an EDF+C file is created.\n \"\"\"\n\n version = RawHeaderFieldInt(8)\n \"\"\"EDF version, always `0`\"\"\"\n local_patient_identification = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"\n Unparsed string representation of the legacy local patient identification.\n\n See also\n --------\n patient: Parsed representation, as a :class:`Patient` object.\n \"\"\"\n local_recording_identification = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"\n Unparsed string representation of the legacy local recording identification.\n\n See also\n --------\n recording: Parsed representation, as a :class:`Recording` object.\n \"\"\"\n _startdate = RawHeaderFieldDate(8, is_settable=True)\n _starttime = RawHeaderFieldTime(8, is_settable=True)\n bytes_in_header_record = RawHeaderFieldInt(8)\n \"\"\"Number of bytes in the header record.\"\"\"\n reserved = RawHeaderFieldStr(44)\n \"\"\"`\"EDF+C\"` for an EDF+C file, else `\"\"`.\"\"\"\n num_data_records = RawHeaderFieldInt(8)\n \"\"\"Number of data records in the recording.\"\"\"\n _data_record_duration = RawHeaderFieldFloat(8, is_settable=True)\n _num_signals = RawHeaderFieldInt(4, is_settable=True)\n\n def __init__(\n self,\n signals: Sequence[EdfSignal],\n *,\n patient: Patient | None = None,\n recording: Recording | None = None,\n starttime: datetime.time | None = None,\n data_record_duration: float | None = None,\n annotations: Iterable[EdfAnnotation] | None = None,\n ):\n if not signals and not annotations:\n raise ValueError(\"Edf must contain either signals or annotations\")\n if patient is None:\n patient = Patient()\n if recording is None:\n recording = Recording()\n if starttime is None:\n starttime = datetime.time(0, 0, 0)\n if data_record_duration is None:\n data_record_duration = _calculate_data_record_duration(signals)\n elif len(signals) == 0 and data_record_duration != 0:\n raise ValueError(\n \"Data record duration must be zero for annotation-only files\"\n )\n\n self._data_record_duration = data_record_duration\n self._set_num_data_records_with_signals(signals)\n self._version = Edf.version.encode(0)\n self.local_patient_identification = patient._to_str()\n self.local_recording_identification = recording._to_str()\n self._set_startdate_with_recording(recording)\n self._starttime = starttime.replace(microsecond=0)\n self._reserved = Edf.reserved.encode(\"\")\n if starttime.microsecond and annotations is None:\n warnings.warn(\"Creating EDF+C to store microsecond starttime.\")\n if annotations is not None or starttime.microsecond:\n signals = (\n *signals,\n _create_annotations_signal(\n annotations if annotations is not None else (),\n num_data_records=self.num_data_records,\n data_record_duration=self.data_record_duration,\n subsecond_offset=starttime.microsecond / 1_000_000,\n ),\n )\n self._reserved = Edf.reserved.encode(\"EDF+C\")\n self._set_signals(signals)\n\n def __repr__(self) -> str:\n signals_text = f\"{len(self.signals)} signal\"\n if len(self.signals) != 1:\n signals_text += \"s\"\n annotations_text = f\"{len(self.annotations)} annotation\"\n if len(self.annotations) != 1:\n annotations_text += \"s\"\n return f\"<Edf {signals_text} {annotations_text}>\"\n\n def _load_data(self, file: Path | io.BufferedReader | io.BytesIO) -> None:\n lens = [signal.samples_per_data_record for signal in self._signals]\n datarecord_len = sum(lens)\n if not isinstance(file, Path):\n datarecords = np.frombuffer(file.read(), dtype=np.int16)\n else:\n datarecords = np.memmap(\n file,\n dtype=np.int16,\n mode=\"r\",\n offset=self.bytes_in_header_record,\n )\n datarecords.shape = (self.num_data_records, datarecord_len)\n ends = np.cumsum(lens)\n starts = ends - lens\n\n for signal, start, end in zip(self._signals, starts, ends):\n signal._digital = datarecords[:, start:end].flatten()\n\n def _read_header(self, buffer: io.BufferedReader | io.BytesIO) -> None:\n for header_name, length in get_header_fields(Edf):\n setattr(self, \"_\" + header_name, buffer.read(length))\n self._signals = self._parse_signal_headers(buffer.read(256 * self._num_signals))\n\n @property\n def signals(self) -> tuple[EdfSignal, ...]:\n \"\"\"\n Ordinary signals contained in the recording.\n\n Annotation signals are excluded. Individual signals can not be removed, added,\n or replaced by modifying this property. Use :meth:`Edf.append_signals`,\n :meth:`Edf.drop_signals`, or :attr:`EdfSignal.data`, respectively.\n \"\"\"\n return tuple(s for s in self._signals if s.label != \"EDF Annotations\")\n\n def _set_signals(self, signals: Sequence[EdfSignal]) -> None:\n signals = tuple(signals)\n self._set_num_data_records_with_signals(signals)\n self._signals = signals\n self._bytes_in_header_record = Edf.bytes_in_header_record.encode(\n 256 * (len(signals) + 1)\n )\n self._num_signals = len(signals)\n if all(s.label == \"EDF Annotations\" for s in signals):\n self._data_record_duration = 0\n\n def _set_num_data_records_with_signals(\n self,\n signals: Sequence[EdfSignal],\n ) -> None:\n if not signals:\n num_data_records = 1\n else:\n signal_durations = [\n round(len(s._digital) / s.sampling_frequency, 12) for s in signals\n ]\n if any(v != signal_durations[0] for v in signal_durations[1:]):\n raise ValueError(\n f\"Inconsistent signal durations (in seconds): {signal_durations}\"\n )\n num_data_records = _calculate_num_data_records(\n signal_durations[0],\n self.data_record_duration,\n )\n signal_lengths = [len(s._digital) for s in signals]\n if any(l % num_data_records for l in signal_lengths):\n raise ValueError(\n f\"Not all signal lengths can be split into {num_data_records} data records: {signal_lengths}\"\n )\n self._num_data_records = Edf.num_data_records.encode(num_data_records)\n\n def _parse_signal_headers(self, raw_signal_headers: bytes) -> tuple[EdfSignal, ...]:\n raw_headers_split: dict[str, list[bytes]] = {}\n start = 0\n for header_name, length in get_header_fields(EdfSignal):\n end = start + length * self._num_signals\n raw_header = raw_signal_headers[start:end]\n raw_headers_split[header_name] = [\n raw_header[i : length + i] for i in range(0, len(raw_header), length)\n ]\n start = end\n signals = []\n for i in range(self._num_signals):\n raw_signal_header = {\n key: raw_headers_split[key][i] for key in raw_headers_split\n }\n try:\n sampling_frequency = (\n int(raw_signal_header[\"samples_per_data_record\"])\n / self.data_record_duration\n )\n except ZeroDivisionError:\n if raw_signal_header[\"_label\"].rstrip() == b\"EDF Annotations\":\n sampling_frequency = 0\n signals.append(\n EdfSignal._from_raw_header(sampling_frequency, **raw_signal_header)\n )\n return tuple(signals)\n\n def write(self, target: Path | str | io.BufferedWriter | io.BytesIO) -> None:\n \"\"\"\n Write an Edf to a file or file-like object.\n\n Parameters\n ----------\n target : Path | str | io.BufferedWriter | io.BytesIO\n The file location (path object or string) or file-like object to write to.\n \"\"\"\n if self.num_data_records == -1:\n warnings.warn(\"num_data_records=-1, determining correct value from data\")\n num_data_records = _calculate_num_data_records(\n len(self._signals[0]._digital) * self._signals[0].sampling_frequency,\n self.data_record_duration,\n )\n else:\n num_data_records = self.num_data_records\n for signal in self._signals:\n signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]\n len(signal._digital) // num_data_records\n )\n header_records = []\n for header_name, _ in get_header_fields(Edf):\n header_records.append(getattr(self, \"_\" + header_name))\n for header_name, _ in get_header_fields(EdfSignal):\n for signal in self._signals:\n header_records.append(getattr(signal, \"_\" + header_name))\n header_record = b\"\".join(header_records)\n\n lens = [signal.samples_per_data_record for signal in self._signals]\n ends = np.cumsum(lens)\n starts = ends - lens\n data_record = np.empty((num_data_records, sum(lens)), dtype=np.int16)\n for signal, start, end in zip(self._signals, starts, ends):\n data_record[:, start:end] = signal._digital.reshape((-1, end - start))\n\n if isinstance(target, str):\n target = Path(target)\n if isinstance(target, io.BufferedWriter):\n target.write(header_record)\n data_record.tofile(target)\n elif isinstance(target, io.BytesIO):\n target.write(header_record)\n target.write(data_record.tobytes())\n else:\n with target.expanduser().open(\"wb\") as file:\n file.write(header_record)\n data_record.tofile(file)\n\n @property\n def labels(self) -> tuple[str, ...]:\n \"\"\"\n The labels of all signals contained in the Edf.\n\n Returns\n -------\n tuple[str, ...]\n The labels, in order of the signals.\n \"\"\"\n return tuple(s.label for s in self.signals)\n\n def get_signal(self, label: str) -> EdfSignal:\n \"\"\"\n Retrieve a single signal by its label.\n\n The label has to be unique - a ValueError is raised if it is ambiguous or does\n not exist.\n\n Parameters\n ----------\n label : str\n A label identifying a single signal\n\n Returns\n -------\n EdfSignal\n The signal corresponding to the given label.\n \"\"\"\n count = self.labels.count(label)\n if count == 0:\n raise ValueError(\n f\"No signal with label {label!r}, possible options: {self.labels}\"\n )\n if count > 1:\n indices = [i for i, l in enumerate(self.labels) if l == label]\n raise ValueError(f\"Ambiguous label {label!r} identifies indices {indices}\")\n return self.signals[self.labels.index(label)]\n\n @property\n def patient(self) -> Patient:\n \"\"\"\n Parsed object representation of the local patient identification.\n\n See :class:`Patient` for information on its attributes.\n \"\"\"\n return Patient._from_str(self.local_patient_identification)\n\n @patient.setter\n def patient(self, patient: Patient) -> None:\n self.local_patient_identification = patient._to_str()\n\n @property\n def recording(self) -> Recording:\n \"\"\"\n Parsed object representation of the local recording identification.\n\n See :class:`Recording` for information on its attributes.\n \"\"\"\n return Recording._from_str(self.local_recording_identification)\n\n @recording.setter\n def recording(self, recording: Recording) -> None:\n self._set_startdate_with_recording(recording)\n self.local_recording_identification = recording._to_str()\n\n @property\n def startdate(self) -> datetime.date:\n \"\"\"\n Recording startdate.\n\n If the :attr:`local_recording_identification` conforms to the EDF+ standard, the\n startdate provided there is used. If not, this falls back to the legacy\n :attr:`startdate` field. If both differ, a warning is issued and the EDF+ field\n is preferred. Raises an `AnonymizedDateError` if the EDF+ field is anonymized\n (i.e., begins with `Startdate X`).\n \"\"\"\n with contextlib.suppress(Exception):\n if self._startdate != self.recording.startdate:\n warnings.warn(\n f\"Different values in startdate fields: {self._startdate}, {self.recording.startdate}\"\n )\n try:\n return self.recording.startdate\n except AnonymizedDateError:\n raise\n except ValueError:\n return self._startdate\n\n @startdate.setter\n def startdate(self, startdate: datetime.date) -> None:\n self._startdate = startdate\n try:\n self.recording.startdate # noqa: B018\n except AnonymizedDateError:\n pass\n except Exception:\n return\n recording_subfields = self.local_recording_identification.split()\n recording_subfields[1] = encode_edfplus_date(startdate)\n self.local_recording_identification = \" \".join(recording_subfields)\n\n @property\n def _subsecond_offset(self) -> float:\n try:\n timekeeping_raw = self._timekeeping_signal._digital.tobytes()\n first_data_record = timekeeping_raw[: timekeeping_raw.find(b\"\\x00\") + 1]\n return _EdfAnnotationsDataRecord.from_bytes(first_data_record).tals[0].onset\n except StopIteration:\n return 0\n\n @property\n def starttime(self) -> datetime.time:\n \"\"\"\n Recording starttime.\n\n In EDF+ files, microsecond accuracy is supported.\n \"\"\"\n subsecond_offset = self._subsecond_offset\n try:\n return self._starttime.replace(\n microsecond=round(subsecond_offset * 1000000)\n )\n except ValueError as e:\n raise ValueError(\n f\"Subsecond offset in first annotation must be 0.X, is {subsecond_offset}\"\n ) from e\n\n @starttime.setter\n def starttime(self, starttime: datetime.time) -> None:\n onset_change = starttime.microsecond / 1000000 - self._subsecond_offset\n self._starttime = starttime.replace(microsecond=0)\n if starttime.microsecond != self.starttime.microsecond:\n timekeeping_signal = self._timekeeping_signal\n data_records = []\n for data_record in timekeeping_signal._digital.reshape(\n (-1, timekeeping_signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n for tal in annot_dr.tals:\n tal.onset = round(tal.onset + onset_change, 12)\n data_records.append(annot_dr.to_bytes())\n maxlen = max(len(data_record) for data_record in data_records)\n if maxlen % 2:\n maxlen += 1\n raw = b\"\".join(dr.ljust(maxlen, b\"\\x00\") for dr in data_records)\n timekeeping_signal._samples_per_data_record = EdfSignal.samples_per_data_record.encode( # type: ignore[attr-defined]\n maxlen // 2\n )\n timekeeping_signal._sampling_frequency = (\n maxlen // 2 * self.data_record_duration\n )\n timekeeping_signal._digital = np.frombuffer(raw, dtype=np.int16)\n\n def _set_startdate_with_recording(self, recording: Recording) -> None:\n try:\n self._startdate = recording.startdate\n except AnonymizedDateError:\n self._startdate = datetime.date(1985, 1, 1)\n\n @property\n def data_record_duration(self) -> float:\n \"\"\"Duration of each data record in seconds.\"\"\"\n return self._data_record_duration\n\n def update_data_record_duration(\n self,\n data_record_duration: float,\n method: Literal[\"strict\", \"pad\", \"truncate\"] = \"strict\",\n ) -> None:\n \"\"\"\n Update the data record duration.\n\n This operation will fail if the new duration is incompatible with the current\n sampling frequencies.\n\n Parameters\n ----------\n data_record_duration : float\n The new data record duration in seconds.\n method : `{\"strict\", \"pad\", \"truncate\"}`, default: `\"strict\"`\n How to handle the case where the new duration does not divide the Edf\n duration evenly\n\n - \"strict\": Raise a ValueError\n - \"pad\": Pad the data with zeros to the next compatible duration. If zero\n is outside the physical range, data is padded with the physical minimum.\n - \"truncate\": Truncate the data to the previous compatible duration (might\n lead to loss of data)\n \"\"\"\n if data_record_duration == self.data_record_duration:\n return\n if data_record_duration <= 0:\n raise ValueError(\n f\"Data record duration must be positive, got {data_record_duration}\"\n )\n if not self.signals:\n raise ValueError(\n \"Data record duration must be zero for annotation-only files\"\n )\n for signal in self.signals:\n spr = signal.sampling_frequency * data_record_duration\n if spr % 1:\n raise ValueError(\n f\"Cannot set data record duration to {data_record_duration}: Incompatible sampling frequency {signal.sampling_frequency} Hz\"\n )\n\n num_data_records = self._pad_or_truncate_signals(data_record_duration, method)\n self._update_record_duration_in_annotation_signals(\n data_record_duration, num_data_records\n )\n self._data_record_duration = data_record_duration\n self._num_data_records = Edf.num_data_records.encode(num_data_records)\n\n @property\n def num_signals(self) -> int:\n \"\"\"Return the number of signals, excluding annotation signals for EDF+.\"\"\"\n return len(self.signals)\n\n def _pad_or_truncate_signals(\n self, data_record_duration: float, method: Literal[\"strict\", \"pad\", \"truncate\"]\n ) -> int:\n if method == \"pad\":\n new_duration = (\n ceil(self.duration / data_record_duration) * data_record_duration\n )\n self._pad_or_truncate_data(new_duration)\n return round(new_duration / data_record_duration)\n if method == \"truncate\":\n new_duration = (\n floor(self.duration / data_record_duration) * data_record_duration\n )\n self._pad_or_truncate_data(new_duration)\n return round(new_duration / data_record_duration)\n return _calculate_num_data_records(self.duration, data_record_duration)\n\n def _update_record_duration_in_annotation_signals(\n self, data_record_duration: float, num_data_records: int\n ) -> None:\n signals = list(self._signals)\n for idx, signal in enumerate(self._signals):\n if signal not in self._annotation_signals:\n continue\n annotations = []\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if signal is self._timekeeping_signal:\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n signals[idx] = _create_annotations_signal(\n [\n EdfAnnotation(a.onset - self._subsecond_offset, a.duration, a.text)\n for a in annotations\n ],\n num_data_records=num_data_records,\n data_record_duration=data_record_duration,\n with_timestamps=signal is self._timekeeping_signal,\n subsecond_offset=self._subsecond_offset,\n )\n self._signals = tuple(signals)\n\n def _pad_or_truncate_data(self, new_duration: float) -> None:\n for signal in self.signals:\n n_samples = round(new_duration * signal.sampling_frequency)\n diff = n_samples - len(signal._digital)\n if diff > 0:\n physical_pad_value = 0.0\n if signal.physical_min > 0 or signal.physical_max < 0:\n physical_pad_value = signal.physical_min\n signal._set_data(\n np.pad(signal.data, (0, diff), constant_values=physical_pad_value)\n )\n elif diff < 0:\n signal._set_data(signal.data[:diff])\n\n def anonymize(self) -> None:\n \"\"\"\n Anonymize a recording.\n\n Header fields are modified as follows:\n - local patient identification is set to `X X X X`\n - local recording identification is set to `Startdate X X X X`\n - startdate is set to `01.01.85`\n - starttime is set to `00.00.00`\n\n For EDF+ files, subsecond starttimes specified via an annotations signal are\n removed.\n \"\"\"\n self.patient = Patient()\n self.recording = Recording()\n self.starttime = datetime.time(0, 0, 0)\n\n def drop_signals(self, drop: Iterable[int | str]) -> None:\n \"\"\"\n Drop signals by index or label.\n\n Signal indices (int) and labels (str) can be provided in the same iterable. For\n ambiguous labels, all corresponding signals are dropped. Raises a ValueError if\n at least one of the provided identifiers does not correspond to a signal.\n\n Parameters\n ----------\n drop : Iterable[int | str]\n The signals to drop, identified by index or label.\n \"\"\"\n if isinstance(drop, str):\n drop = [drop]\n selected: list[EdfSignal] = []\n dropped: list[int | str] = []\n i = 0\n for signal in self._signals:\n if signal.label == \"EDF Annotations\":\n selected.append(signal)\n continue\n if i in drop or signal.label in drop:\n dropped.append(i)\n dropped.append(signal.label)\n else:\n selected.append(signal)\n i += 1\n if not_dropped := set(drop) - set(dropped):\n raise ValueError(f\"No signal found with index/label {not_dropped}\")\n self._signals = tuple(selected)\n self._bytes_in_header_record = Edf.bytes_in_header_record.encode(\n 256 * (len(selected) + 1)\n )\n self._num_signals = len(selected)\n\n def append_signals(self, new_signals: EdfSignal | Iterable[EdfSignal]) -> None:\n \"\"\"\n Append one or more signal(s) to the Edf recording.\n\n Every signal must be compatible with the current `data_record_duration` and all\n signal durations must match the overall recording duration. For recordings\n containing EDF+ annotation signals, the new signals are inserted after the last\n ordinary (i.e. non-annotation) signal.\n\n Parameters\n ----------\n new_signals : EdfSignal | Iterable[EdfSignal]\n The signal(s) to add.\n \"\"\"\n if isinstance(new_signals, EdfSignal):\n new_signals = [new_signals]\n last_ordinary_index = 0\n for i, signal in enumerate(self._signals):\n if signal.label != \"EDF Annotations\":\n last_ordinary_index = i\n self._set_signals(\n [\n *self._signals[: last_ordinary_index + 1],\n *new_signals,\n *self._signals[last_ordinary_index + 1 :],\n ]\n )\n\n @property\n def _annotation_signals(self) -> Iterable[EdfSignal]:\n return (signal for signal in self._signals if signal.label == \"EDF Annotations\")\n\n @property\n def _timekeeping_signal(self) -> EdfSignal:\n return next(iter(self._annotation_signals))\n\n @property\n def duration(self) -> float:\n \"\"\"Recording duration in seconds.\"\"\"\n return self.num_data_records * self.data_record_duration\n\n @property\n def annotations(self) -> tuple[EdfAnnotation, ...]:\n \"\"\"\n All annotations contained in the Edf, sorted chronologically.\n\n Does not include timekeeping annotations.\n \"\"\"\n annotations: list[EdfAnnotation] = []\n for i, signal in enumerate(self._annotation_signals):\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if i == 0:\n # from https://www.edfplus.info/specs/edfplus.html#timekeeping:\n # The first annotation of the first 'EDF Annotations' signal in each\n # data record is empty, but its timestamp specifies how many seconds\n # after the file startdate/time that data record starts.\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n subsecond_offset = self._subsecond_offset\n annotations = [\n EdfAnnotation(\n round(ann.onset - subsecond_offset, 12), ann.duration, ann.text\n )\n for ann in annotations\n ]\n return tuple(sorted(annotations))\n\n def drop_annotations(self, text: str) -> None:\n \"\"\"\n Drop annotations with a given text.\n\n Parameters\n ----------\n text : str\n All annotations whose text exactly matches this parameter are removed.\n \"\"\"\n for signal in self._annotation_signals:\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annotations = _EdfAnnotationsDataRecord.from_bytes(\n data_record.tobytes()\n )\n annotations.drop_annotations_with_text(text)\n data_record[:] = np.frombuffer(\n annotations.to_bytes().ljust(len(data_record) * 2, b\"\\x00\"),\n dtype=np.int16,\n )\n\n def to_bytes(self) -> bytes:\n \"\"\"\n Convert an Edf to a `bytes` object.\n\n Returns\n -------\n bytes\n The binary representation of the Edf object (i.e., what a file created with\n `Edf.write` would contain).\n \"\"\"\n stream = io.BytesIO()\n self.write(stream)\n stream.seek(0)\n return stream.read()\n\n def slice_between_seconds(\n self,\n start: float,\n stop: float,\n *,\n keep_all_annotations: bool = False,\n ) -> None:\n \"\"\"\n Slice to the interval between two times.\n\n The sample point corresponding to `stop` is excluded. `start` and `stop` are\n given in seconds from recording start and have to correspond exactly to a sample\n time in all non-annotation signals.\n\n Parameters\n ----------\n start : float\n Start time in seconds from recording start.\n stop : float\n Stop time in seconds from recording start.\n keep_all_annotations : bool, default: False\n If set to `True`, annotations outside the selected time interval are kept.\n \"\"\"\n signals: list[EdfSignal] = []\n self._verify_seconds_inside_recording_time(start)\n self._verify_seconds_inside_recording_time(stop)\n self._verify_seconds_coincide_with_sample_time(start)\n self._verify_seconds_coincide_with_sample_time(stop)\n self._num_data_records = Edf.num_data_records.encode(\n int((stop - start) / self.data_record_duration)\n )\n for signal in self._signals:\n if signal.label == \"EDF Annotations\":\n signals.append(\n self._slice_annotations_signal(\n signal,\n start=start,\n stop=stop,\n keep_all_annotations=keep_all_annotations,\n )\n )\n else:\n start_index = start * signal.sampling_frequency\n stop_index = stop * signal.sampling_frequency\n signal._digital = signal._digital[int(start_index) : int(stop_index)]\n signals.append(signal)\n self._set_signals(signals)\n self._shift_startdatetime(int(start))\n\n def slice_between_annotations(\n self,\n start_text: str,\n stop_text: str,\n *,\n keep_all_annotations: bool = False,\n ) -> None:\n \"\"\"\n Slice to the interval between two EDF+ annotations.\n\n The sample point corresponding to the onset of the annotation identified by\n `stop_text` is excluded. `start_text` and `stop_text` each have to uniquely\n identify a single annotation, whose onset corresponds exactly to a sample time\n in all non-annotation signals.\n\n Parameters\n ----------\n start_text : str\n Text identifying the start annotation.\n stop_text : str\n Text identifying the stop annotation.\n keep_all_annotations : bool, default: False\n If set to `True`, annotations outside the selected time interval are kept.\n \"\"\"\n self.slice_between_seconds(\n self._get_annotation_by_text(start_text).onset,\n self._get_annotation_by_text(stop_text).onset,\n keep_all_annotations=keep_all_annotations,\n )\n\n def _get_annotation_by_text(self, text: str) -> EdfAnnotation:\n matches = []\n for annotation in self.annotations:\n if annotation.text == text:\n matches.append(annotation)\n if len(matches) == 1:\n return matches[0]\n if len(matches) > 1:\n raise ValueError(\n f\"Ambiguous annotation text {text!r}, found {len(matches)} matches\"\n )\n raise ValueError(f\"No annotation found with text {text!r}\")\n\n def _verify_seconds_inside_recording_time(self, seconds: float) -> None:\n if not 0 <= seconds <= self.duration:\n raise ValueError(\n f\"{seconds} is an invalid slice time for recording duration {self.duration}\"\n )\n\n def _verify_seconds_coincide_with_sample_time(self, seconds: float) -> None:\n for i, signal in enumerate(self.signals):\n index = seconds * signal.sampling_frequency\n if index != int(index):\n raise ValueError(\n f\"{seconds}s is not a sample time of signal {i} ({signal.label}) with fs={signal.sampling_frequency}Hz\"\n )\n\n def _shift_startdatetime(self, seconds: float) -> None:\n timedelta = datetime.timedelta(seconds=seconds)\n try:\n startdate = self.startdate\n startdate_anonymized = False\n except AnonymizedDateError:\n startdate = datetime.date.fromtimestamp(0)\n startdate_anonymized = True\n startdatetime = datetime.datetime.combine(startdate, self.starttime)\n startdatetime += timedelta\n if not startdate_anonymized:\n self.startdate = startdatetime.date()\n self.starttime = startdatetime.time()\n\n def copy(self) -> Edf:\n \"\"\"\n Create a deep copy of the Edf.\n\n Returns\n -------\n Edf\n The copied Edf object.\n \"\"\"\n return copy.deepcopy(self)\n\n def _slice_annotations_signal(\n self,\n signal: EdfSignal,\n *,\n start: float,\n stop: float,\n keep_all_annotations: bool,\n ) -> EdfSignal:\n is_timekeeping_signal = signal == self._timekeeping_signal\n annotations: list[EdfAnnotation] = []\n for data_record in signal._digital.reshape(\n (-1, signal.samples_per_data_record)\n ):\n annot_dr = _EdfAnnotationsDataRecord.from_bytes(data_record.tobytes())\n if is_timekeeping_signal:\n annotations.extend(annot_dr.annotations[1:])\n else:\n annotations.extend(annot_dr.annotations)\n annotations = [\n EdfAnnotation(round(a.onset - start, 12), a.duration, a.text)\n for a in annotations\n if keep_all_annotations or start <= a.onset < stop\n ]\n return _create_annotations_signal(\n annotations,\n num_data_records=self.num_data_records,\n data_record_duration=self.data_record_duration,\n with_timestamps=is_timekeeping_signal,\n subsecond_offset=self._subsecond_offset + start - int(start),\n )" }, { "identifier": "EdfSignal", "path": "edfio/edf.py", "snippet": "class EdfSignal:\n \"\"\"A single EDF signal.\n\n Attributes that might break the signal or file on modification (i.e.,\n `sampling_frequency`, `physical_range`, `digital_range`, `samples_per_data_record`,\n and `reserved`) can not be set after instantiation.\n\n To reduce memory consumption, signal data is always stored as a 16-bit integer array\n containing the digital values that would be written to the corresponding EDF file.\n Therefore, it is expected that `EdfSignal.data` does not match the physical\n values passed during instantiation exactly.\n\n Parameters\n ----------\n data : npt.NDArray[np.float64]\n The signal data (physical values).\n sampling_frequency : float\n The sampling frequency in Hz.\n label : str, default: `\"\"`\n The signal's label, e.g., `\"EEG Fpz-Cz\"` or `\"Body temp\"`.\n transducer_type : str, default: `\"\"`\n The transducer type, e.g., `\"AgAgCl electrode\"`.\n physical_dimension : str, default: `\"\"`\n The physical dimension, e.g., `\"uV\"` or `\"degreeC\"`\n physical_range : tuple[float, float] | None, default: None\n The physical range given as a tuple of `(physical_min, physical_max)`. If\n `None`, this is determined from the data.\n digital_range : tuple[int, int], default: `(-32768, 32767)`\n The digital range given as a tuple of `(digital_min, digital_max)`. Uses the\n maximum resolution of 16-bit integers by default.\n prefiltering : str, default: `\"\"`\n The signal prefiltering, e.g., `\"HP:0.1Hz LP:75Hz\"`.\n \"\"\"\n\n _label = RawHeaderFieldStr(16, is_settable=True)\n transducer_type = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"Transducer type, e.g., `\"AgAgCl electrode\"`.\"\"\"\n physical_dimension = RawHeaderFieldStr(8, is_settable=True)\n \"\"\"Physical dimension, e.g., `\"uV\"` or `\"degreeC\"`.\"\"\"\n physical_min = RawHeaderFieldFloat(8)\n \"\"\"Physical minimum, e.g., `-500` or `34`.\"\"\"\n physical_max = RawHeaderFieldFloat(8)\n \"\"\"Physical maximum, e.g., `500` or `40`.\"\"\"\n digital_min = RawHeaderFieldInt(8)\n \"\"\"Digital minimum, e.g., `-2048`.\"\"\"\n digital_max = RawHeaderFieldInt(8)\n \"\"\"Digital maximum, e.g., `2047`.\"\"\"\n prefiltering = RawHeaderFieldStr(80, is_settable=True)\n \"\"\"Signal prefiltering, e.g., `\"HP:0.1Hz LP:75Hz\"`.\"\"\"\n samples_per_data_record = RawHeaderFieldInt(8)\n \"\"\"\n Number of samples in each data record.\n\n For newly instantiated :class:`EdfSignal` objects, this is only set once\n :meth:`Edf.write` is called.\n \"\"\"\n reserved = RawHeaderFieldStr(32)\n \"\"\"Reserved signal header field, always `\"\"`\"\"\"\n\n def __init__(\n self,\n data: npt.NDArray[np.float64],\n sampling_frequency: float,\n *,\n label: str = \"\",\n transducer_type: str = \"\",\n physical_dimension: str = \"\",\n physical_range: tuple[float, float] | None = None,\n digital_range: tuple[int, int] = (-32768, 32767),\n prefiltering: str = \"\",\n ):\n self._sampling_frequency = sampling_frequency\n self.label = label\n self.transducer_type = transducer_type\n self.physical_dimension = physical_dimension\n self.prefiltering = prefiltering\n self._reserved = EdfSignal.reserved.encode(\"\")\n if not np.all(np.isfinite(data)):\n raise ValueError(\"Signal data must contain only finite values\")\n self._set_physical_range(physical_range, data)\n self._set_digital_range(digital_range)\n self._set_data(data)\n\n def __repr__(self) -> str:\n info = f\"{self.sampling_frequency:g}Hz\"\n if self.label:\n info = f\"{self.label} \" + info\n return f\"<EdfSignal {info}>\"\n\n @classmethod\n def _from_raw_header(\n cls,\n sampling_frequency: float,\n *,\n _label: bytes,\n transducer_type: bytes,\n physical_dimension: bytes,\n physical_min: bytes,\n physical_max: bytes,\n digital_min: bytes,\n digital_max: bytes,\n prefiltering: bytes,\n samples_per_data_record: bytes,\n reserved: bytes,\n ) -> EdfSignal:\n sig = object.__new__(cls)\n sig._sampling_frequency = sampling_frequency\n sig._label = EdfSignal._label.decode(_label) # type: ignore[attr-defined]\n sig._transducer_type = transducer_type # type: ignore[attr-defined]\n sig._physical_dimension = physical_dimension # type: ignore[attr-defined]\n sig._physical_min = physical_min # type: ignore[attr-defined]\n sig._physical_max = physical_max # type: ignore[attr-defined]\n sig._digital_min = digital_min # type: ignore[attr-defined]\n sig._digital_max = digital_max # type: ignore[attr-defined]\n sig._prefiltering = prefiltering # type: ignore[attr-defined]\n sig._samples_per_data_record = samples_per_data_record # type: ignore[attr-defined]\n sig._reserved = reserved # type: ignore[attr-defined]\n return sig\n\n @classmethod\n def from_hypnogram(\n cls,\n stages: npt.NDArray[np.float64],\n stage_duration: float = 30,\n *,\n label: str = \"\",\n ) -> EdfSignal:\n \"\"\"Create an EDF signal from a hypnogram, with scaling according to EDF specs.\n\n According to the EDF FAQ [1]_, use integer numbers 0, 1, 2, 3, 4, 5, 6, and 9\n for sleep stages W, 1, 2, 3, 4, R, MT, und unscored, respectively. The digital\n range is set to `(0, 9)`.\n\n Parameters\n ----------\n stages : npt.NDArray[np.float64]\n The sleep stages, coded as integer numbers.\n stage_duration : float, default: `30`\n The duration of each sleep stage in seconds, used to set the sampling\n frequency to its inverse.\n label : str, default: `\"\"`\n The signal's label.\n\n Returns\n -------\n EdfSignal\n The resulting :class:`EdfSignal` object.\n\n References\n ----------\n .. [1] EDF FAQ, https://www.edfplus.info/specs/edffaq.html\n \"\"\"\n allowed_stages = {0, 1, 2, 3, 4, 5, 6, 9}\n if invalid_stages := set(stages) - allowed_stages:\n raise ValueError(f\"stages contains invalid values: {invalid_stages}\")\n return EdfSignal(\n data=stages,\n sampling_frequency=1 / stage_duration,\n label=label,\n physical_range=(0, 9),\n digital_range=(0, 9),\n )\n\n @property\n def label(self) -> str:\n \"\"\"Signal label, e.g., `\"EEG Fpz-Cz\"` or `\"Body temp\"`.\"\"\"\n return self._label\n\n @label.setter\n def label(self, label: str) -> None:\n if label == \"EDF Annotations\":\n raise ValueError(\"Ordinary signal label must not be 'EDF Annotations'.\")\n self._label = label\n\n @property\n def physical_range(self) -> FloatRange:\n \"\"\"The physical range as a tuple of `(physical_min, physical_max)`.\"\"\"\n return FloatRange(self.physical_min, self.physical_max)\n\n @property\n def digital_range(self) -> IntRange:\n \"\"\"The digital range as a tuple of `(digital_min, digital_max)`.\"\"\"\n return IntRange(self.digital_min, self.digital_max)\n\n @property\n def sampling_frequency(self) -> float:\n \"\"\"The sampling frequency in Hz.\"\"\"\n return self._sampling_frequency\n\n @property\n def data(self) -> npt.NDArray[np.float64]:\n \"\"\"\n Numpy array containing the physical signal values as floats.\n\n To simplify avoiding inconsistencies between signal data and header fields,\n individual values in the returned array can not be modified. Use\n :meth:`EdfSignal.update_data` to overwrite with new physical data.\n \"\"\"\n try:\n gain, offset = calculate_gain_and_offset(\n self.digital_min,\n self.digital_max,\n self.physical_min,\n self.physical_max,\n )\n except ZeroDivisionError:\n data = self._digital.astype(np.float64)\n warnings.warn(\n f\"Digital minimum equals digital maximum ({self.digital_min}) for {self.label}, returning uncalibrated signal.\"\n )\n except ValueError:\n data = self._digital.astype(np.float64)\n else:\n data = (self._digital + offset) * gain\n data.setflags(write=False)\n return data\n\n def update_data(\n self,\n data: npt.NDArray[np.float64],\n *,\n keep_physical_range: bool = False,\n sampling_frequency: float | None = None,\n ) -> None:\n \"\"\"\n Overwrite physical signal values with an array of equal length.\n\n Parameters\n ----------\n data : npt.NDArray[np.float64]\n The new physical data.\n keep_physical_range : bool, default: False\n If `True`, the `physical_range` is not modified to accomodate the new data.\n sampling_frequency : float | None, default: None\n If not `None`, the `sampling_frequency` is updated to the new value. The new\n data must match the expected length for the new sampling frequency.\n \"\"\"\n expected_length = len(self._digital)\n if (\n sampling_frequency is not None\n and sampling_frequency != self._sampling_frequency\n ):\n expected_length = self._get_expected_new_length(sampling_frequency)\n if len(data) != expected_length:\n raise ValueError(\n f\"Signal lengths must match: got {len(data)}, expected {len(self._digital)}.\"\n )\n physical_range = self.physical_range if keep_physical_range else None\n self._set_physical_range(physical_range, data)\n if sampling_frequency is not None:\n self._sampling_frequency = sampling_frequency\n self._set_data(data)\n\n def _get_expected_new_length(self, sampling_frequency: float) -> int:\n if sampling_frequency <= 0:\n raise ValueError(\n f\"Sampling frequency must be positive, got {sampling_frequency}\"\n )\n current_length = len(self._digital)\n expected_length_f = (\n sampling_frequency / self._sampling_frequency * current_length\n )\n if not math.isclose(expected_length_f, round(expected_length_f), rel_tol=1e-10):\n raise ValueError(\n f\"Sampling frequency of {sampling_frequency} results in non-integer number of samples ({expected_length_f})\"\n )\n return round(expected_length_f)\n\n def _set_digital_range(self, digital_range: tuple[int, int]) -> None:\n digital_range = IntRange(*digital_range)\n if digital_range.min == digital_range.max:\n raise ValueError(\n f\"Digital minimum ({digital_range.min}) must differ from digital maximum ({digital_range.max}).\"\n )\n self._digital_min = EdfSignal.digital_min.encode(digital_range.min)\n self._digital_max = EdfSignal.digital_max.encode(digital_range.max)\n\n def _set_physical_range(\n self,\n physical_range: tuple[float, float] | None,\n data: npt.NDArray[np.float64],\n ) -> None:\n if physical_range is None:\n physical_range = FloatRange(data.min(), data.max())\n if physical_range.min == physical_range.max:\n physical_range = FloatRange(physical_range.min, physical_range.max + 1)\n else:\n physical_range = FloatRange(*physical_range)\n if physical_range.min == physical_range.max:\n raise ValueError(\n f\"Physical minimum ({physical_range.min}) must differ from physical maximum ({physical_range.max}).\"\n )\n data_min = data.min()\n data_max = data.max()\n if data_min < physical_range.min or data_max > physical_range.max:\n raise ValueError(\n f\"Signal range [{data_min}, {data_max}] out of physical range: [{physical_range.min}, {physical_range.max}]\"\n )\n self._physical_min = EdfSignal.physical_min.encode(\n round_float_to_8_characters(physical_range.min, math.floor)\n )\n self._physical_max = EdfSignal.physical_max.encode(\n round_float_to_8_characters(physical_range.max, math.ceil)\n )\n\n def _set_data(self, data: npt.NDArray[np.float64]) -> None:\n gain, offset = calculate_gain_and_offset(\n self.digital_min,\n self.digital_max,\n self.physical_min,\n self.physical_max,\n )\n self._digital = np.round(data / gain - offset).astype(np.int16)" }, { "identifier": "Patient", "path": "edfio/edf.py", "snippet": "class Patient:\n \"\"\"\n Object representation of the local patient identification.\n\n Parsing from/to the string containing the local_patient_identification header field\n is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may not\n contain spaces.\n\n Parameters\n ----------\n code : str, default: `\"X\"`\n The code by which the patient is known in the hospital administration.\n sex : `{\"X\", \"F\", \"M\"}`, default: `\"X\"`\n Sex, `F` for female, `M` for male, `X` if anonymized.\n birthdate : datetime.date | None, default: None\n Patient birthdate, stored as `X` if `None`.\n name : str, default: `\"X\"`\n The patient's name, stored as `X` if `None`.\n additional : Sequence[str], default: `()`\n Optional additional subfields. Will be stored in the header field separated by\n spaces.\n \"\"\"\n\n def __init__(\n self,\n *,\n code: str = \"X\",\n sex: Literal[\"F\", \"M\", \"X\"] = \"X\",\n birthdate: datetime.date | None = None,\n name: str = \"X\",\n additional: Sequence[str] = (),\n ) -> None:\n if sex not in (\"F\", \"M\", \"X\"):\n raise ValueError(f\"Invalid sex: {sex}, must be one of F, M, X\")\n if birthdate is None:\n birthdate_field = \"X\"\n else:\n birthdate_field = encode_edfplus_date(birthdate)\n subfields = {\n \"code\": code,\n \"sex\": sex,\n \"birthdate\": birthdate_field,\n \"name\": name,\n **{f\"additional[{i}]\": v for i, v in enumerate(additional)},\n }\n validate_subfields(subfields)\n local_patient_identification = \" \".join(subfields.values())\n encode_str(local_patient_identification, 80)\n self._local_patient_identification = local_patient_identification\n\n def __repr__(self) -> str:\n try:\n return repr_from_init(self)\n except Exception:\n return repr(self._local_patient_identification)\n\n @classmethod\n def _from_str(cls, string: str) -> Patient:\n encode_str(string, 80)\n obj = object.__new__(cls)\n obj._local_patient_identification = string\n return obj\n\n def _to_str(self) -> str:\n return self._local_patient_identification\n\n @property\n def code(self) -> str:\n \"\"\"The code by which the patient is known in the hospital administration.\"\"\"\n return self.get_subfield(0)\n\n @property\n def sex(self) -> str:\n \"\"\"Sex, `F` for female, `M` for male, `X` if anonymized.\"\"\"\n return self.get_subfield(1)\n\n @property\n def birthdate(self) -> datetime.date:\n \"\"\"Patient birthdate.\"\"\"\n birthdate_field = self.get_subfield(2)\n if birthdate_field == \"X\":\n raise AnonymizedDateError(\"Patient birthdate is not available ('X').\")\n return decode_edfplus_date(birthdate_field)\n\n @property\n def name(self) -> str:\n \"\"\"The patient's name.\"\"\"\n return self.get_subfield(3)\n\n @property\n def additional(self) -> tuple[str, ...]:\n \"\"\"Optional additional subfields.\"\"\"\n return tuple(self._local_patient_identification.split()[4:])\n\n def get_subfield(self, idx: int) -> str:\n \"\"\"\n Access a subfield of the local patient identification field by index.\n\n Parameters\n ----------\n idx : int\n The index of the subfield to access.\n\n Returns\n -------\n str\n The subfield at the specified index. If the index exceeds the actually\n available number of subfields, the return value is `\"X\"`.\n \"\"\"\n subfields = self._local_patient_identification.split()\n if len(subfields) <= idx:\n return \"X\"\n return subfields[idx]" }, { "identifier": "Recording", "path": "edfio/edf.py", "snippet": "class Recording:\n \"\"\"\n Object representation of the local recording identification.\n\n Parsing from/to the string containing the local_recording_identification header\n field is done according to the EDF+ specs. Subfields must be ASCII (32..126) and may\n not contain spaces.\n\n Parameters\n ----------\n startdate : datetime.date | None, default: None\n The recording startdate.\n hospital_administration_code : str, default: `\"X\"`\n The hospital administration code of the investigation, e.g., EEG number or PSG\n number.\n investigator_technician_code : str, default: `\"X\"`\n A code specifying the responsible investigator or technician.\n equipment_code : str, default: `\"X\"`\n A code specifying the used equipment.\n additional : Sequence[str], default: `()`\n Optional additional subfields. Will be stored in the header field separated by\n spaces.\n \"\"\"\n\n def __init__(\n self,\n *,\n startdate: datetime.date | None = None,\n hospital_administration_code: str = \"X\",\n investigator_technician_code: str = \"X\",\n equipment_code: str = \"X\",\n additional: Sequence[str] = (),\n ) -> None:\n if startdate is None:\n startdate_field = \"X\"\n else:\n startdate_field = encode_edfplus_date(startdate)\n subfields = {\n \"startdate\": startdate_field,\n \"hospital_administration_code\": hospital_administration_code,\n \"investigator_technician_code\": investigator_technician_code,\n \"equipment_code\": equipment_code,\n **{f\"additional[{i}]\": v for i, v in enumerate(additional)},\n }\n validate_subfields(subfields)\n local_recording_identification = \" \".join((\"Startdate\", *subfields.values()))\n encode_str(local_recording_identification, 80)\n self._local_recording_identification = local_recording_identification\n\n def __repr__(self) -> str:\n try:\n return repr_from_init(self)\n except Exception:\n return repr(self._local_recording_identification)\n\n @classmethod\n def _from_str(cls, string: str) -> Recording:\n encode_str(string, 80)\n obj = object.__new__(cls)\n obj._local_recording_identification = string\n return obj\n\n def _to_str(self) -> str:\n return self._local_recording_identification\n\n @property\n def startdate(self) -> datetime.date:\n \"\"\"The recording startdate.\"\"\"\n if not self._local_recording_identification.startswith(\"Startdate \"):\n raise ValueError(\n f\"Local recording identification field {self._local_recording_identification!r} does not follow EDF+ standard.\"\n )\n startdate_field = self.get_subfield(1)\n if startdate_field == \"X\":\n raise AnonymizedDateError(\"Recording startdate is not available ('X').\")\n return decode_edfplus_date(startdate_field)\n\n @property\n def hospital_administration_code(self) -> str:\n \"\"\"The hospital administration code of the investigation.\"\"\"\n return self.get_subfield(2)\n\n @property\n def investigator_technician_code(self) -> str:\n \"\"\"A code specifying the responsible investigator or technician.\"\"\"\n return self.get_subfield(3)\n\n @property\n def equipment_code(self) -> str:\n \"\"\"A code specifying the used equipment.\"\"\"\n return self.get_subfield(4)\n\n @property\n def additional(self) -> tuple[str, ...]:\n \"\"\"Optional additional subfields.\"\"\"\n return tuple(self._local_recording_identification.split()[5:])\n\n def get_subfield(self, idx: int) -> str:\n \"\"\"\n Access a subfield of the local recording identification field by index.\n\n Parameters\n ----------\n idx : int\n The index of the subfield to access. The first subfield (starting at\n index 0) should always be \"Startdate\" according to the EDF+ spedification.\n\n Returns\n -------\n str\n The subfield at the specified index. If the index exceeds the actually\n available number of subfields, the return value is `\"X\"`.\n \"\"\"\n subfields = self._local_recording_identification.split()\n if len(subfields) <= idx:\n return \"X\"\n return subfields[idx]" } ]
import datetime import numpy as np import pytest from edfio import AnonymizedDateError, Edf, EdfSignal, Patient, Recording
13,638
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording():
@pytest.fixture() def patient(): return Patient._from_str("MCH-0234567 F 02-MAY-1951 Haagse_Harry") @pytest.fixture() def recording():
return Recording._from_str(
4
2023-11-09 09:53:27+00:00
16k
sb-ai-lab/HypEx
hypex/matcher.py
[ { "identifier": "FaissMatcher", "path": "hypex/algorithms/faiss_matcher.py", "snippet": "class FaissMatcher:\n \"\"\"A class used to match instances using Faiss library.\"\"\"\n\n def __init__(\n self,\n df: pd.DataFrame,\n outcomes: str,\n treatment: str,\n info_col: list,\n features: [list, pd.DataFrame] = None,\n group_col: str = None,\n weights: dict = None,\n sigma: float = 1.96,\n validation: bool = None,\n n_neighbors: int = 10,\n silent: bool = True,\n pbar: bool = True,\n ):\n \"\"\"Construct all the necessary attributes.\n\n Args:\n df:\n The input dataframe\n outcomes:\n The target column name\n treatment:\n The column name with treatment\n info_col:\n A list with informational column names\n features:\n A list with names of feature using to matching. Defaults to None\n group_col:\n The column for stratification. Defaults to None\n weights:\n Dict with wight of features to matching. If you would like that matching will be more for\n 1 feature and less for another one\n sigma:\n The significant level for confidence interval calculation Defaults to 1.96\n validation:\n The flag for validation of estimated ATE with default method `random_feature`\n n_neighbors:\n The number of neighbors to find for each object. Defaults to 10\n silent:\n Write logs in debug mode\n pbar:\n Display progress bar while get index\n \"\"\"\n self.n_neighbors = n_neighbors\n if group_col is None:\n self.df = df\n else:\n self.df = df.sort_values([treatment, group_col])\n self.columns_del = [outcomes]\n if info_col:\n self.info_col = info_col\n else:\n self.info_col = []\n\n if self.info_col is not None:\n self.columns_del = self.columns_del + [x for x in self.info_col if x in self.df.columns]\n self.outcomes = outcomes if type(outcomes) == list else [outcomes]\n self.treatment = treatment\n\n if features is None:\n self.columns_match = list(\n set([x for x in list(self.df.columns) if x not in self.info_col] + [self.treatment] + self.outcomes)\n )\n else:\n try:\n self.columns_match = features[\"Feature\"].tolist() + [self.treatment] + self.outcomes\n except TypeError:\n self.columns_match = features + [self.treatment] + self.outcomes\n\n self.features_quality = (\n self.df.drop(columns=[self.treatment] + self.outcomes + self.info_col)\n .select_dtypes(include=[\"int16\", \"int32\", \"int64\", \"float16\", \"float32\", \"float64\"])\n .columns\n )\n self.dict_outcome_untreated = {}\n self.dict_outcome_treated = {}\n self.group_col = group_col\n self.weights = weights\n self.treated_index = None\n self.untreated_index = None\n self.orig_treated_index = None\n self.orig_untreated_index = None\n self.results = {}\n self.ATE = None\n self.sigma = sigma\n self.quality_dict = {}\n self.rep_dict = None\n self.validation = validation\n self.silent = silent\n self.pbar = pbar\n self.tqdm = None\n self.results = pd.DataFrame()\n\n def __getstate__(self) -> dict:\n \"\"\"Prepare the object for serialization.\n\n This method is called when the object is about to be serialized.\n It removes the `tqdm` attribute from the object's dictionary\n because `tqdm` objects cannot be serialized.\n\n Returns:\n A copy of the object's dictionary with the `tqdm` attribute removed.\n \"\"\"\n state = self.__dict__.copy()\n if \"tqdm\" in state:\n del state[\"tqdm\"]\n return state\n\n def __setstate__(self, state: dict):\n \"\"\"Restore the object after deserialization.\n\n This method is called when the object is deserialized.\n It adds the `tqdm` attribute back to the object's dictionary\n if the `pbar` attribute is True.\n\n Args:\n state:\n The deserialized state of the object\n \"\"\"\n if \"pbar\" in state and state[\"pbar\"]:\n state[\"tqdm\"] = None\n self.__dict__.update(state)\n\n def _get_split(self, df: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):\n \"\"\"Creates split data by treatment column.\n\n Separate treatment column with 1 (treated) an 0 (untreated),\n scales and transforms treatment column\n\n Args:\n df:\n The input dataframe\n\n Returns:\n Tuple of dataframes - one for treated (df[self.treatment] == 1]) and\n one for untreated (df[self.treatment] == 0]). Drops self.outcomes and\n `self.treatment` columns\n\n \"\"\"\n logger.debug(\"Creating split data by treatment column\")\n\n treated = df[df[self.treatment] == 1].drop([self.treatment] + self.outcomes, axis=1)\n untreated = df[df[self.treatment] == 0].drop([self.treatment] + self.outcomes, axis=1)\n\n return treated, untreated\n\n def _predict_outcome(self, std_treated: pd.DataFrame, std_untreated: pd.DataFrame):\n \"\"\"Applies LinearRegression to input arrays.\n\n Calculate biases of treated and untreated values,\n creates dict of y - regular, matched and without bias.\n\n Args:\n std_treated:\n The dataframe of treated data\n std_untreated:\n The dataframe of untreated data\n\n \"\"\"\n logger.debug(\"Predicting target by Linear Regression\")\n\n start_time = dt.datetime.now()\n logger.debug(\"start --\")\n\n self.dict_outcome_untreated = {}\n self.dict_outcome_treated = {}\n df = self.df.drop(columns=self.info_col)\n\n for outcome in self.outcomes:\n y_untreated = df[df[self.treatment] == 0][outcome].to_numpy()\n y_treated = df[df[self.treatment] == 1][outcome].to_numpy()\n\n x_treated = std_treated.to_numpy()\n x_untreated = std_untreated.to_numpy()\n y_match_treated = np.array([y_untreated[idx].mean() for idx in self.treated_index])\n y_match_untreated = np.array([y_treated[idx].mean() for idx in self.untreated_index])\n x_match_treated = np.array([x_untreated[idx].mean(0) for idx in self.treated_index])\n x_match_untreated = np.array([x_treated[idx].mean(0) for idx in self.untreated_index])\n bias_coefs_c = bias_coefs(self.untreated_index, y_treated, x_treated)\n bias_coefs_t = bias_coefs(self.treated_index, y_untreated, x_untreated)\n bias_c = bias(x_untreated, x_match_untreated, bias_coefs_c)\n bias_t = bias(x_treated, x_match_treated, bias_coefs_t)\n\n y_match_treated_bias = y_treated - y_match_treated + bias_t\n y_match_untreated_bias = y_match_untreated - y_untreated - bias_c\n\n self.dict_outcome_untreated[outcome] = y_untreated\n self.dict_outcome_untreated[outcome + POSTFIX] = y_match_untreated\n self.dict_outcome_untreated[outcome + POSTFIX_BIAS] = y_match_untreated_bias\n\n self.dict_outcome_treated[outcome] = y_treated\n self.dict_outcome_treated[outcome + POSTFIX] = y_match_treated\n self.dict_outcome_treated[outcome + POSTFIX_BIAS] = y_match_treated_bias\n\n end_time = dt.datetime.now()\n total = dt.datetime.strptime(str(end_time - start_time), \"%H:%M:%S.%f\").strftime(\"%H:%M:%S\")\n logger.debug(f\"end -- [work time{total}]\")\n\n def _create_outcome_matched_df(self, dict_outcome: dict, is_treated: bool) -> pd.DataFrame:\n \"\"\"Creates dataframe with outcomes values and treatment.\n\n Args:\n dict_outcome:\n A dictionary containing outcomes\n is_treated:\n A boolean value indicating whether the outcome is treated or not\n\n Returns:\n A dataframe with matched outcome and treatment columns\n\n \"\"\"\n df_pred = pd.DataFrame(dict_outcome)\n df_pred[self.treatment] = int(is_treated)\n df_pred[self.treatment + POSTFIX] = int(not is_treated)\n\n return df_pred\n\n def _create_features_matched_df(self, index: np.ndarray, is_treated: bool) -> pd.DataFrame:\n \"\"\"Creates matched dataframe with features.\n\n Args:\n index:\n An array of indices\n is_treated:\n A boolean value indicating whether the outcome is treated or not\n\n\n Returns:\n A dataframe of matched features\n\n \"\"\"\n df = self.df.drop(columns=self.outcomes + self.info_col)\n\n if self.group_col is None:\n untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()\n converted_index = [untreated_index[i] for i in index]\n filtered = df.loc[df[self.treatment] == int(not is_treated)].values\n untreated_df = pd.DataFrame(\n data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=df.columns\n ) # добавить дату в данные и пофиксить баги с этим (тут ломалось)\n if self.info_col is not None and len(self.info_col) != 1:\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n else:\n ids = self.df[df[self.treatment] == int(not is_treated)][self.info_col].values.ravel()\n converted_index = [ids[i] for i in index]\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n treated_df[\"index\"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()\n else:\n df = df.sort_values([self.treatment, self.group_col])\n untreated_index = df[df[self.treatment] == int(not is_treated)].index.to_numpy()\n converted_index = [untreated_index[i] for i in index]\n filtered = df.loc[df[self.treatment] == int(not is_treated)]\n cols_untreated = [col for col in filtered.columns if col != self.group_col]\n filtered = filtered.drop(columns=self.group_col).to_numpy()\n untreated_df = pd.DataFrame(\n data=np.array([filtered[idx].mean(axis=0) for idx in index]), columns=cols_untreated\n )\n treated_df = df[df[self.treatment] == int(is_treated)].reset_index()\n grp = treated_df[self.group_col]\n untreated_df[self.group_col] = grp\n if self.info_col is not None and len(self.info_col) != 1:\n untreated_df[\"index\"] = pd.Series(converted_index)\n else:\n ids = (\n self.df[df[self.treatment] == int(not is_treated)]\n .sort_values([self.treatment, self.group_col])[self.info_col]\n .values.ravel()\n )\n converted_index = [ids[i] for i in index]\n untreated_df[\"index\"] = pd.Series(converted_index)\n treated_df[\"index\"] = self.df[self.df[self.treatment] == int(is_treated)][self.info_col].values.ravel()\n untreated_df.columns = [col + POSTFIX for col in untreated_df.columns]\n\n x = pd.concat([treated_df, untreated_df], axis=1).drop(\n columns=[self.treatment, self.treatment + POSTFIX], axis=1\n )\n return x\n\n def _create_matched_df(self) -> pd.DataFrame:\n \"\"\"Creates matched df of features and outcome.\n\n Returns:\n Matched dataframe\n \"\"\"\n df_pred_treated = self._create_outcome_matched_df(self.dict_outcome_treated, True)\n df_pred_untreated = self._create_outcome_matched_df(self.dict_outcome_untreated, False)\n\n df_matched = pd.concat([df_pred_treated, df_pred_untreated])\n\n treated_x = self._create_features_matched_df(self.treated_index, True)\n untreated_x = self._create_features_matched_df(self.untreated_index, False)\n\n untreated_x = pd.concat([treated_x, untreated_x])\n\n columns = list(untreated_x.columns) + list(df_matched.columns)\n\n df_matched = pd.concat([untreated_x, df_matched], axis=1, ignore_index=True)\n df_matched.columns = columns\n\n return df_matched\n\n def calc_atc(self, df: pd.DataFrame, outcome: str) -> tuple:\n \"\"\"Calculates Average Treatment Effect for the control group (ATC).\n\n Effect on control group if it was affected\n\n Args:\n df:\n Input dataframe\n outcome:\n The outcome to be considered for treatment effect\n\n Returns:\n Contains ATC, scaled counts, and variances as numpy arrays\n\n \"\"\"\n logger.debug(\"Calculating ATC\")\n\n df = df[df[self.treatment] == 0]\n N_c = len(df)\n ITT_c = df[outcome + POSTFIX_BIAS]\n scaled_counts_c = scaled_counts(N_c, self.treated_index, self.silent)\n\n vars_c = np.repeat(ITT_c.var(), N_c) # conservative\n atc = ITT_c.mean()\n\n return atc, scaled_counts_c, vars_c\n\n def calc_att(self, df: pd.DataFrame, outcome: str) -> tuple:\n \"\"\"Calculates Average Treatment Effect for the treated (ATT).\n\n Args:\n df:\n Input dataframe\n outcome:\n The outcome to be considered for treatment effect\n\n Returns:\n Contains ATT, scaled counts, and variances as numpy arrays\n\n \"\"\"\n logger.debug(\"Calculating ATT\")\n\n df = df[df[self.treatment] == 1]\n N_t = len(df)\n ITT_t = df[outcome + POSTFIX_BIAS]\n scaled_counts_t = scaled_counts(N_t, self.untreated_index, self.silent)\n\n vars_t = np.repeat(ITT_t.var(), N_t) # conservative\n att = ITT_t.mean()\n\n return att, scaled_counts_t, vars_t\n\n def _calculate_ate_all_target(self, df: pd.DataFrame):\n \"\"\"Creates dictionaries of all effect: ATE, ATC, ATT.\n\n Args:\n df:\n Input dataframe\n\n \"\"\"\n logger.debug(\"Creating dicts of all effects: ATE, ATC, ATT\")\n\n att_dict = {}\n atc_dict = {}\n ate_dict = {}\n N = len(df)\n N_t = df[self.treatment].sum()\n N_c = N - N_t\n\n for outcome in self.outcomes:\n att, scaled_counts_t, vars_t = self.calc_att(df, outcome)\n atc, scaled_counts_c, vars_c = self.calc_atc(df, outcome)\n ate = (N_c / N) * atc + (N_t / N) * att\n\n att_se = calc_att_se(vars_c, vars_t, scaled_counts_c)\n atc_se = calc_atc_se(vars_c, vars_t, scaled_counts_t)\n ate_se = calc_ate_se(vars_c, vars_t, scaled_counts_c, scaled_counts_t)\n\n ate_dict[outcome] = [\n ate,\n ate_se,\n pval_calc(ate / ate_se),\n ate - self.sigma * ate_se,\n ate + self.sigma * ate_se,\n ]\n atc_dict[outcome] = [\n atc,\n atc_se,\n pval_calc(atc / atc_se),\n atc - self.sigma * atc_se,\n atc + self.sigma * atc_se,\n ]\n att_dict[outcome] = [\n att,\n att_se,\n pval_calc(att / att_se),\n att - self.sigma * att_se,\n att + self.sigma * att_se,\n ]\n\n self.ATE, self.ATC, self.ATT = ate_dict, atc_dict, att_dict\n self.val_dict = ate_dict\n\n def matching_quality(self, df_matched) -> Dict[str, Union[Dict[str, float], float]]:\n \"\"\"Estimated the quality of covariates balance and repeat fraction.\n\n Calculates population stability index,Standardized mean difference\n and Kolmogorov-Smirnov test for numeric values. Returns a dictionary of reports.\n\n Args:\n df_matched:\n Matched DataFrame to calculate quality\n\n Returns:\n dictionary containing PSI, KS-test, SMD data and repeat fractions\n\n \"\"\"\n if self.silent:\n logger.debug(\"Estimating quality of matching\")\n else:\n logger.info(\"Estimating quality of matching\")\n\n psi_columns = set(self.columns_match)\n psi_columns = list(psi_columns - set([self.treatment] + self.outcomes))\n psi_data, ks_data, smd_data = matching_quality(\n df_matched, self.treatment, sorted(self.features_quality), sorted(psi_columns), self.silent\n )\n\n rep_dict = {\n \"match_control_to_treat\": check_repeats(np.concatenate(self.treated_index), silent=self.silent),\n \"match_treat_to_control\": check_repeats(np.concatenate(self.untreated_index), silent=self.silent),\n }\n\n self.quality_dict = {\"psi\": psi_data, \"ks_test\": ks_data, \"smd\": smd_data, \"repeats\": rep_dict}\n\n rep_df = pd.DataFrame.from_dict(rep_dict, orient=\"index\").rename(columns={0: \"value\"})\n self.rep_dict = rep_df\n\n if self.silent:\n logger.debug(f\"PSI info: \\n {psi_data.head(10)} \\nshape:{psi_data.shape}\")\n logger.debug(f\"Kolmogorov-Smirnov test info: \\n {ks_data.head(10)} \\nshape:{ks_data.shape}\")\n logger.debug(f\"Standardised mean difference info: \\n {smd_data.head(10)} \\nshape:{smd_data.shape}\")\n logger.debug(f\"Repeats info: \\n {rep_df.head(10)}\")\n else:\n logger.info(f\"PSI info: \\n {psi_data.head(10)} \\nshape:{psi_data.shape}\")\n logger.info(f\"Kolmogorov-Smirnov test info: \\n {ks_data.head(10)} \\nshape:{ks_data.shape}\")\n logger.info(f\"Standardised mean difference info: \\n {smd_data.head(10)} \\nshape:{smd_data.shape}\")\n logger.info(f\"Repeats info: \\n {rep_df.head(10)}\")\n\n return self.quality_dict\n\n def group_match(self):\n \"\"\"Matches the dataframe if it divided by groups.\n\n Returns:\n A tuple containing the matched dataframe and metrics such as ATE, ATT and ATC\n\n \"\"\"\n df = self.df.drop(columns=self.info_col)\n groups = sorted(df[self.group_col].unique())\n matches_c = []\n matches_t = []\n group_arr_c = df[df[self.treatment] == 0][self.group_col].to_numpy()\n group_arr_t = df[df[self.treatment] == 1][self.group_col].to_numpy()\n treat_arr_c = df[df[self.treatment] == 0][self.treatment].to_numpy()\n treat_arr_t = df[df[self.treatment] == 1][self.treatment].to_numpy()\n\n if self.pbar:\n self.tqdm = tqdm(total=len(groups) * 2)\n\n for group in groups:\n df_group = df[df[self.group_col] == group]\n temp = df_group[self.columns_match + [self.group_col]]\n temp = temp.loc[:, (temp != 0).any(axis=0)].drop(columns=self.group_col)\n treated, untreated = self._get_split(temp)\n\n std_treated_np, std_untreated_np = _transform_to_np(treated, untreated, self.weights)\n\n if self.pbar:\n self.tqdm.set_description(desc=f\"Get untreated index by group {group}\")\n matches_u_i = _get_index(std_treated_np, std_untreated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(1)\n self.tqdm.set_description(desc=f\"Get treated index by group {group}\")\n matches_t_i = _get_index(std_untreated_np, std_treated_np, self.n_neighbors)\n if self.pbar:\n self.tqdm.update(1)\n self.tqdm.refresh()\n\n group_mask_c = group_arr_c == group\n group_mask_t = group_arr_t == group\n matches_c_mask = np.arange(treat_arr_t.shape[0])[group_mask_t]\n matches_u_i = [matches_c_mask[i] for i in matches_u_i]\n matches_t_mask = np.arange(treat_arr_c.shape[0])[group_mask_c]\n matches_t_i = [matches_t_mask[i] for i in matches_t_i]\n matches_c.extend(matches_u_i)\n matches_t.extend(matches_t_i)\n\n if self.pbar:\n self.tqdm.close()\n\n self.untreated_index = matches_c\n self.treated_index = matches_t\n\n df_group = df[self.columns_match].drop(columns=self.group_col)\n treated, untreated = self._get_split(df_group)\n self._predict_outcome(treated, untreated)\n df_matched = self._create_matched_df()\n self._calculate_ate_all_target(df_matched)\n\n if self.validation:\n return self.val_dict\n\n return self.report_view(), df_matched\n\n def match(self):\n \"\"\"Matches the dataframe.\n\n Returns:\n A tuple containing the matched dataframe and metrics such as ATE, ATT and ATC\n\n \"\"\"\n if self.group_col is not None:\n return self.group_match()\n\n df = self.df[self.columns_match]\n treated, untreated = self._get_split(df)\n\n std_treated_np, std_untreated_np = _transform_to_np(treated, untreated, self.weights)\n\n if self.pbar:\n self.tqdm = tqdm(total=len(std_treated_np) + len(std_untreated_np))\n self.tqdm.set_description(desc=\"Get untreated index\")\n\n untreated_index = _get_index(std_treated_np, std_untreated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(len(std_treated_np))\n self.tqdm.set_description(desc=\"Get treated index\")\n treated_index = _get_index(std_untreated_np, std_treated_np, self.n_neighbors)\n\n if self.pbar:\n self.tqdm.update(len(std_untreated_np))\n self.tqdm.refresh()\n self.tqdm.close()\n\n self.untreated_index = untreated_index\n self.treated_index = treated_index\n\n self._predict_outcome(treated, untreated)\n\n df_matched = self._create_matched_df()\n self._calculate_ate_all_target(df_matched)\n\n if self.validation:\n return self.val_dict\n\n return self.report_view(), df_matched\n\n def report_view(self) -> pd.DataFrame:\n \"\"\"Formats the ATE, ATC, and ATT results into a Pandas DataFrame for easy viewing.\n\n Returns:\n DataFrame containing ATE, ATC, and ATT results\n \"\"\"\n result = (self.ATE, self.ATC, self.ATT)\n\n for outcome in self.outcomes:\n res = pd.DataFrame(\n [x[outcome] + [outcome] for x in result],\n columns=[\"effect_size\", \"std_err\", \"p-val\", \"ci_lower\", \"ci_upper\", \"outcome\"],\n index=[\"ATE\", \"ATC\", \"ATT\"],\n )\n self.results = pd.concat([self.results, res])\n return self.results" }, { "identifier": "MatcherNoReplacement", "path": "hypex/algorithms/no_replacement_matching.py", "snippet": "class MatcherNoReplacement:\n \"\"\"Matching groups with no replacement.\n\n Realized by optimizing the linear sum of distances between pairs of treatment and\n control samples.\n \"\"\"\n\n def __init__(self, X: pd.DataFrame, a: pd.Series, weights: dict = None, approximate_match: bool = False):\n \"\"\"Initialize matching.\n\n Args:\n X: features dataframe\n a: series of treatment value\n weights: weights for numeric columns in order to increase matching quality.\n approximate_match: use or not approximate matching\n \"\"\"\n self.treatment = a\n self.X = X\n self.weights = weights\n self.approximate_match = approximate_match\n\n def match(self):\n \"\"\"Function run matching with no replacement.\n\n Returns:\n Dataframe of matched indexes.\n \"\"\"\n matches = {}\n cov = conditional_covariance(self.X[self.treatment == 1].values, self.X[self.treatment == 0].values)\n distance_matrix = self._get_distance_matrix(self.X[self.treatment == 1], self.X[self.treatment == 0], cov)\n source_array, neighbor_array_indices, distances = optimally_match_distance_matrix(distance_matrix)\n source_df = self.X[self.treatment == 1].iloc[np.array(source_array)]\n target_df = self.X[self.treatment == 0].iloc[np.array(neighbor_array_indices)]\n\n matches[1] = self.create_match_df(self.treatment, source_df, target_df, distances)\n matches[0] = self.create_match_df(self.treatment, target_df, source_df, distances)\n\n match_df = pd.concat(matches, sort=True)\n return match_df\n\n def create_match_df(\n self, base_series: pd.Series, source_df: pd.DataFrame, target_df: pd.DataFrame, distances: list\n ) -> pd.DataFrame:\n \"\"\"Function creates matching dataframe.\n\n Args:\n base_series: series of treatment value.\n source_df: dataframe of sources indexes.\n target_df: dataframe of target indexes.\n distances: matrix of calculated distances.\n\n Returns:\n Matched dataframe of indexes.\n \"\"\"\n match_sub_df = pd.DataFrame(\n index=base_series.index,\n columns=[\n \"matches\",\n \"distances\",\n ],\n data=base_series.apply(lambda x: pd.Series([[], []])).values,\n dtype=\"object\",\n )\n\n # matching from source to target: read distances\n match_sub_df.loc[source_df.index] = pd.DataFrame(\n data=dict(\n matches=[[tidx] for tidx in target_df.index],\n distances=distances,\n ),\n index=source_df.index,\n )\n\n # matching from target to target: fill with zeros\n match_sub_df.loc[target_df.index] = pd.DataFrame(\n data=dict(\n matches=[[tidx] for tidx in target_df.index],\n distances=[[0]] * len(distances),\n ),\n index=target_df.index,\n )\n return match_sub_df\n\n def _get_metric_dict(self, cov: np.ndarray) -> dict:\n \"\"\"Function calculates correct feature space and generate metrics dist for cdist calculation.\n\n Args:\n cov: Matrix of covariations.\n\n Returns:\n Metric dictionary\n \"\"\"\n metric_dict = dict(metric=\"mahalanobis\")\n mahalanobis_transform = np.linalg.inv(cov)\n if self.weights is not None:\n features = self.X.columns\n w_list = np.array([self.weights[col] if col in self.weights.keys() else 1 for col in features])\n w_matrix = np.sqrt(np.diag(w_list / w_list.sum()))\n mahalanobis_transform = np.dot(w_matrix, mahalanobis_transform)\n\n metric_dict[\"VI\"] = mahalanobis_transform\n return metric_dict\n\n def _get_distance_matrix(self, source_df: pd.DataFrame, target_df: pd.DataFrame, cov: np.ndarray) -> np.ndarray:\n \"\"\"Create distance matrix for no replacement match.\n\n Combines metric and source/target data into a\n precalculated distance matrix which can be passed to\n scipy.optimize.linear_sum_assignment.\n\n Args:\n source_df: source feature dataframe.\n target_df: target feature dataframe.\n cov: matrix of covariations.\n\n Returns:\n Matrix of distances.\n \"\"\"\n cdist_args = dict(XA=_ensure_array_columnlike(source_df.values), XB=_ensure_array_columnlike(target_df.values))\n cdist_args.update(self._get_metric_dict(cov))\n\n if self.approximate_match:\n if len(cdist_args['XB']) < len(cdist_args['XA']):\n covariance_matrix = np.cov(cdist_args['XB'].T)\n else:\n covariance_matrix = np.cov(cdist_args['XA'].T)\n covariance_matrix_reg = covariance_matrix + np.eye(covariance_matrix.shape[0]) * 1e-8\n\n distance_matrix = np.zeros((cdist_args['XA'].shape[0], cdist_args['XB'].shape[0]))\n for i, x in enumerate(cdist_args['XA']):\n distance_matrix[i] = _m_distance(cdist_args['XB'], x, np.linalg.inv(covariance_matrix_reg))\n else:\n distance_matrix = distance.cdist(**cdist_args)\n return distance_matrix" }, { "identifier": "FeatureSelector", "path": "hypex/selectors/feature_selector.py", "snippet": "class FeatureSelector:\n \"\"\"Class of LAMA Feature selector. Select top features. By default, use LGM.\n # TODO: write some feature selector\"\"\"\n\n def __init__(\n self,\n outcome: str,\n outcome_type: str,\n treatment: str,\n timeout: int,\n n_threads: int,\n n_folds: int,\n verbose: bool, # не используется\n generate_report: bool,\n report_dir: str,\n use_algos: List[str],\n ):\n \"\"\"Initialize the LamaFeatureSelector.\n\n Args:\n outcome:\n The target column\n outcome_type:\n The type of target column\n treatment:\n The column that determines control and test groups\n timeout:\n Time limit for the execution of the code\n n_threads:\n Maximum number of threads to be used\n n_folds:\n Number of folds for cross-validation\n verbose:\n Flag to control the verbosity of the process stages\n generate_report:\n Flag to control whether to create a report or not\n report_dir:\n Directory for storing report files\n use_algos:\n List of names of LAMA algorithms for feature selection\n \"\"\"\n self.outcome = outcome\n self.outcome_type = outcome_type\n self.treatment = treatment\n self.use_algos = use_algos\n self.timeout = timeout\n self.n_threads = n_threads\n self.n_folds = n_folds\n self.verbose = verbose\n self.generate_report = generate_report\n self.report_dir = report_dir\n\n def perform_selection(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Trains a model and returns feature scores.\n\n This method defines metrics, applies the model, creates a report, and returns feature scores\n\n Args:\n df:\n Input data\n\n Returns:\n A DataFrame containing the feature scores from the model\n\n \"\"\"\n roles = {\n \"target\": self.outcome,\n \"drop\": [self.treatment],\n }\n\n if self.outcome_type == \"numeric\":\n task_name = \"reg\"\n loss = \"mse\"\n metric = \"mse\"\n elif self.outcome_type == \"binary\":\n task_name = \"binary\"\n loss = \"logloss\"\n metric = \"logloss\"\n else:\n task_name = \"multiclass\"\n loss = \"crossentropy\"\n metric = \"crossentropy\"\n\n features_scores = []\n\n return features_scores" }, { "identifier": "SpearmanFilter", "path": "hypex/selectors/spearman_filter.py", "snippet": "class SpearmanFilter:\n \"\"\"Class to filter columns based on the Spearman correlation coefficient.\n\n The class is utilized to filter dataframe columns that do not exhibit a significant\n correlation (based on a provided threshold) with a specified outcome column.\n The significance of the correlation is determined using the Spearman correlation coefficient\n and a p-value threshold of 0.05\n \"\"\"\n\n def __init__(self, outcome: str, treatment: str, threshold: float):\n \"\"\"Initialize spearman filter.\n\n Args:\n outcome:\n The name of target column\n treatment:\n The name of the column that determines control and test groups\n threshold:\n The threshold for the Spearman correlation coefficient filter\n \"\"\"\n self.outcome: str = outcome\n self.treatment: str = treatment\n self.threshold: float = threshold\n\n def perform_filter(self, df: pd.DataFrame) -> pd.DataFrame:\n \"\"\"Filters columns based on their correlation with the outcome column.\n\n The method tests the correlation using the Spearman correlation coefficient.\n Columns that have an absolute correlation coefficient value less than the provided threshold,\n and a p-value less than 0.05, are considered insignificant and are removed from the dataframe\n\n Args:\n df:\n The input DataFrame\n\n Returns:\n The filtered DataFrame, containing only columns that\n are significantly correlated with the outcome column\n \"\"\"\n selected = []\n columns = df.drop([self.treatment, self.outcome], 1).columns\n for column in columns:\n result = spearmanr(df[self.outcome].values, df[column].values)\n if (abs(result[0] < self.threshold)) and (result[1] < PVALUE):\n selected.append(column)\n\n logger.info(f\"Drop columns {list(set(columns) - set(selected))}\")\n\n columns = selected + [self.treatment, self.outcome]\n df = df[columns]\n\n return df" }, { "identifier": "OutliersFilter", "path": "hypex/selectors/outliers_filter.py", "snippet": "class OutliersFilter:\n \"\"\"Class of Outliers Filter. It creates a row indices that should be deleted by percentile.\"\"\"\n\n def __init__(self, interquartile_coeff, mode_percentile, min_percentile, max_percentile):\n \"\"\"Initializes the OutliersFilter.\n\n Args:\n interquartile_coeff:\n Coefficient for the interquartile range to determine outliers\n mode_percentile:\n If True, outliers are determined by custom percentiles\n min_percentile:\n The lower percentile. Values below this percentile are considered outliers.\n max_percentile:\n The upper percentile. Values above this percentile are considered outliers\n \"\"\"\n self.interquartile_coeff = interquartile_coeff\n self.mode_percentile = mode_percentile\n self.min_percentile = min_percentile\n self.max_percentile = max_percentile\n\n def perform_filter(self, df: pd.DataFrame, interquartile: bool = True) -> pd.DataFrame:\n \"\"\"Identifies rows with outliers.\n\n This method creates a set of row indices to be removed, which contains values less than\n `min_percentile` and larger than `max_percentile` (if `mode_percentile` is True), or values\n smaller than the 0.2 and larget than 0.8 (if `mode_percentile` is False)\n\n Args:\n df:\n The input DataFrame\n interquartile:\n If True, uses the interquartile range to determine outliers. Defaults to True\n\n Returns:\n The set of row indices with outliers\n \"\"\"\n columns_names = df.select_dtypes(include=\"number\").columns\n rows_for_del = []\n for column in columns_names:\n if self.mode_percentile:\n min_value = df[column].quantile(self.min_percentile)\n max_value = df[column].quantile(self.max_percentile)\n elif interquartile:\n upper_quantile = df[column].quantile(0.8)\n lower_quantile = df[column].quantile(0.2)\n\n interquartile_range = upper_quantile - lower_quantile\n min_value = lower_quantile - self.interquartile_coeff * interquartile_range\n max_value = upper_quantile + self.interquartile_coeff * interquartile_range\n else:\n mean_value = df[column].mean()\n standard_deviation = df[column].std()\n nstd_lower, nstd_upper = 3, 3\n\n min_value = mean_value - nstd_lower * standard_deviation\n max_value = mean_value + nstd_upper * standard_deviation\n\n rows_for_del_column = (df[column] < min_value) | (df[column] > max_value)\n rows_for_del_column = df.index[rows_for_del_column].tolist()\n rows_for_del.extend(rows_for_del_column)\n rows_for_del = list(set(rows_for_del))\n logger.info(f\"Drop {len(rows_for_del)} rows\")\n return df.drop(rows_for_del)" }, { "identifier": "const_filtration", "path": "hypex/selectors/base_filtration.py", "snippet": "def const_filtration(X: pd.DataFrame, threshold: float = 0.95) -> list:\n \"\"\"Function removes features consist of constant value on 95%.\n\n Args:\n X: related dataset\n threshold: constant fill rate, default is 0.95\n\n Returns:\n List of filtered columns\n \"\"\"\n is_const = pd.Series(0, index=X.columns, dtype=np.dtype(bool))\n for col in X.columns:\n # NaNs are not counted using unique (since np.nan != np.nan). Fill them with a unique value:\n cur_col = X.loc[:, col]\n cur_col.loc[~np.isfinite(cur_col)] = cur_col.max() + 1\n # Get values' frequency:\n freqs = cur_col.value_counts(normalize=True)\n is_const[col] = np.any(freqs > threshold)\n\n selected_features = ~is_const\n if np.sum(selected_features) == 0:\n raise AssertionError(\"All features were removed by constant filtration.\")\n else:\n return X.loc[:, selected_features].columns.to_list()" }, { "identifier": "nan_filtration", "path": "hypex/selectors/base_filtration.py", "snippet": "def nan_filtration(X: pd.DataFrame, threshold: float = 0.8):\n \"\"\"Function removes features consist of NaN value on 80%.\n\n Args:\n X: related dataset\n threshold: constant fill rate, default is 0.95\n\n Returns:\n List of filtered columns\n \"\"\"\n nan_freqs = np.mean(pd.isnull(X), axis=0)\n is_sparse = nan_freqs > threshold\n selected_features = ~is_sparse\n if np.sum(selected_features) == 0:\n raise AssertionError(\"All features were removed by nan filtration.\")\n else:\n return X.loc[:, selected_features].columns.to_list()" }, { "identifier": "random_feature", "path": "hypex/utils/validators.py", "snippet": "def random_feature(df: pd.DataFrame):\n \"\"\"Adds a random feature to the initial dataset.\n\n Args:\n df:\n The initial dataframe\n\n Returns:\n The modified dataframe with an additional random feature\n A validation flag\n \"\"\"\n feature = np.random.normal(0, 1, size=len(df))\n validate = 1\n df[\"random_feature\"] = feature\n return df, validate" }, { "identifier": "random_treatment", "path": "hypex/utils/validators.py", "snippet": "def random_treatment(df: pd.DataFrame, treatment: str):\n \"\"\"Replaces real treatment with a random placebo treatment.\n\n Args:\n df:\n The initial dataframe\n treatment:\n The columns name representing the treatment\n\n Returns:\n The modified dataframe with the original treatment replaced\n The original treatment series\n A validation flag\n \"\"\"\n prop1 = df[treatment].sum() / df.shape[0]\n prop0 = 1 - prop1\n new_treatment = np.random.choice([0, 1], size=df.shape[0], p=[prop0, prop1])\n validate = 1\n orig_treatment = df[treatment]\n df = df.drop(columns=treatment)\n df[treatment] = new_treatment\n return df, orig_treatment, validate" }, { "identifier": "subset_refuter", "path": "hypex/utils/validators.py", "snippet": "def subset_refuter(df: pd.DataFrame, treatment: str, fraction: float = 0.8):\n \"\"\"Returns a subset of data with given fraction (default 0.8).\n\n Args:\n df:\n The initial dataframe\n treatment:\n The column name representing the treatment\n fraction:\n The fraction of the dataset to divide random matching\n\n Returns:\n The subset of the dataframe\n A validation flag\n \"\"\"\n df = df.groupby(treatment, group_keys=False).apply(lambda x: x.sample(frac=fraction))\n validate = 1\n return df, validate" }, { "identifier": "test_significance", "path": "hypex/utils/validators.py", "snippet": "def test_significance(estimate: float, simulations: List) -> float:\n \"\"\"Performs a significance test for a normal distribution.\n\n Args:\n estimate:\n The estimated effect\n simulations:\n A list of estimated effects from each simulation\n\n Returns:\n The p-value of the test\n \"\"\"\n mean_refute_value = np.mean(simulations)\n std_dev_refute_values = np.std(simulations)\n z_score = (estimate - mean_refute_value) / std_dev_refute_values\n\n if z_score > 0: # Right Tail\n p_value = 1 - st.norm.cdf(z_score)\n else: # Left Tail\n p_value = st.norm.cdf(z_score)\n\n return p_value" } ]
import logging import pickle import numpy as np import pandas as pd from typing import Union from tqdm.auto import tqdm from .algorithms.faiss_matcher import FaissMatcher from .algorithms.no_replacement_matching import MatcherNoReplacement from .selectors.feature_selector import FeatureSelector from .selectors.spearman_filter import SpearmanFilter from .selectors.outliers_filter import OutliersFilter from .selectors.base_filtration import const_filtration, nan_filtration from .utils.validators import random_feature from .utils.validators import random_treatment from .utils.validators import subset_refuter from .utils.validators import test_significance
12,777
def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature": self.input_data, self.validate = random_feature(self.input_data) if self.features_importance is not None and i == 0: self.features_importance.append("random_feature") self.matcher = FaissMatcher( self.input_data, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) elif refuter == "subset_refuter": df, self.validate = subset_refuter(self.input_data, self.treatment, fraction) self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) else: logger.error("Incorrect refuter name") raise NameError( "Incorrect refuter name! Available refuters: `random_feature`, `random_treatment`, `subset_refuter`" ) if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) for outcome in self.outcomes: self.pval_dict.update({outcome: [np.mean(self.val_dict[outcome])]}) self.pval_dict[outcome].append(
"""Base Matcher class.""" REPORT_FEAT_SELECT_DIR = "report_feature_selector" REPORT_PROP_MATCHER_DIR = "report_matcher" NAME_REPORT = "lama_interactive_report.html" N_THREADS = 1 N_FOLDS = 4 RANDOM_STATE = 123 TEST_SIZE = 0.2 TIMEOUT = 600 VERBOSE = 2 USE_ALGOS = ["lgb"] PROP_SCORES_COLUMN = "prop_scores" GENERATE_REPORT = True SAME_TARGET_THRESHOLD = 0.7 OUT_INTER_COEFF = 1.5 OUT_MODE_PERCENT = True OUT_MIN_PERCENT = 0.02 OUT_MAX_PERCENT = 0.98 logger = logging.getLogger("hypex") console_out = logging.StreamHandler() logging.basicConfig( handlers=(console_out,), format="[%(asctime)s | %(name)s | %(levelname)s]: %(message)s", datefmt="%d.%m.%Y %H:%M:%S", level=logging.INFO, ) class Matcher: """Class for compile full pipeline of Matching in Causal Inference task. Matcher steps: - Read, analyze data - Feature selection via LightAutoML - Converting a dataset with features to another space via Cholesky decomposition In the new space, the distance L2 becomes equivalent to the Mahalanobis distance. This allows us to use faiss to search for nearest objects, which can search only by L2 metric, but without violating the methodology of matching, for which it is important to count by the Mahalanobis distance - Finding the nearest neighbors for each unit (with duplicates) using faiss. For each of the control group, neighbors from the target group are matched and vice versa. - Calculation bias - Creating matched df (Wide df with pairs) - Calculation metrics: ATE, ATT, ATC, p-value, and сonfidence intervals - Calculation quality: PS-test, KS test, SMD test - Returns metrics as dataframe, quality results as dict of df's and df_matched - After receiving the result, the result should be validated using :func:`~hypex.matcher.Matcher.validate_result` Example: Common usecase - base pipeline for matching >>> # Base info >>> treatment = "treatment" # Column name with info about 'treatment' 0 or 1 >>> target = "target" # Column name with target >>> >>> # Optional >>> info_col = ["user_id", 'address'] # Columns that will not participate in the match and are informative. >>> group_col = "CatCol" # Column name for strict comparison (for a categorical feature) >>> >>> # Matching >>> model = Matcher(data, outcome=target, treatment=treatment, info_col=info_col, group_col=group_col) >>> features = model.lama_feature_select() # Feature selection via lama >>> results, quality, df_matched = model.estimate(features=some_features) # Performs matching >>> >>> model.validate_result() """ def __init__( self, input_data: pd.DataFrame, treatment: str, outcome: Union[str, list] = None, outcome_type: str = "numeric", group_col: str = None, info_col: list = None, weights: dict = None, base_filtration: bool = False, generate_report: bool = GENERATE_REPORT, report_feat_select_dir: str = REPORT_FEAT_SELECT_DIR, timeout: int = TIMEOUT, n_threads: int = N_THREADS, n_folds: int = N_FOLDS, verbose: bool = VERBOSE, use_algos: list = None, same_target_threshold: float = SAME_TARGET_THRESHOLD, interquartile_coeff: float = OUT_INTER_COEFF, drop_outliers_by_percentile: bool = OUT_MODE_PERCENT, min_percentile: float = OUT_MIN_PERCENT, max_percentile: float = OUT_MAX_PERCENT, n_neighbors: int = 1, silent: bool = True, pbar: bool = True, ): """Initialize the Matcher object. Args: input_data: Input dataframe outcome: Target column treatment: Column determine control and test groups outcome_type: Values type of target column. Defaults to "numeric" group_col: Column for grouping. Defaults to None. info_col: Columns with id, date or metadata, not taking part in calculations. Defaults to None weights: weights for numeric columns in order to increase matching quality by weighted feature. By default, is None (all features have the same weight equal to 1). Example: {'feature_1': 10} base_filtration: To use or not base filtration of features in order to remove all constant or almost all constant, bool. Default is False. generate_report: Flag to create report. Defaults to True report_feat_select_dir: Folder for report files. Defaults to "report_feature_selector" timeout: Limit work time of code LAMA. Defaults to 600 n_threads: Maximum number of threads. Defaults to 1 n_folds: Number of folds for cross-validation. Defaults to 4 verbose: Flag to show process stages. Defaults to 2 use_algos: List of names of LAMA algorithms for feature selection. Defaults to ["lgb"] same_target_threshold: Threshold for correlation coefficient filter (Spearman). Default to 0.7 interquartile_coeff: Percent for drop outliers. Default to 1.5 drop_outliers_by_percentile: Flag to drop outliers by custom percentiles. Defaults to True min_percentile: Minimum percentile to drop outliers. Defaults to 0.02 max_percentile: Maximum percentile to drop outliers. Defaults to 0.98 n_neighbors: Number of neighbors to match (in fact you may see more then n matches as every match may have more then one neighbor with the same distance). Default value is 1. silent: Write logs in debug mode pbar: Display progress bar while get index """ if use_algos is None: use_algos = USE_ALGOS self.input_data = input_data if outcome is None: outcome = list() self.outcomes = outcome if type(outcome) == list else [outcome] self.treatment = treatment self.group_col = group_col self.info_col = info_col self.outcome_type = outcome_type self.weights = weights self.generate_report = generate_report self.report_feat_select_dir = report_feat_select_dir self.timeout = timeout self.n_threads = n_threads self.n_folds = n_folds self.verbose = verbose self.use_algos = use_algos self.same_target_threshold = same_target_threshold self.interquartile_coeff = interquartile_coeff self.mode_percentile = drop_outliers_by_percentile self.min_percentile = min_percentile self.max_percentile = max_percentile self.base_filtration = base_filtration self.features_importance = None self.matcher = None self.val_dict = None self.pval_dict = None self.new_treatment = None self.validate = None self.dropped_features = [] self.n_neighbors = n_neighbors self.silent = silent self.pbar = pbar self._preprocessing_data() def _convert_categorical_to_dummy(self): """Converts categorical variables to dummy variables. Returns: Data with categorical variables converted to dummy variables. """ info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col if columns_to_drop is not None: data = self.input_data.drop(columns=columns_to_drop) else: data = self.input_data dummy_data = pd.get_dummies(data, drop_first=True, dtype=np.uint8) return dummy_data def _preprocessing_data(self): """Converts categorical features into dummy variables.""" info_col = self.info_col if self.info_col is not None else [] group_col = [self.group_col] if self.group_col is not None else [] columns_to_drop = info_col + group_col + self.outcomes + [self.treatment] if self.base_filtration: filtered_features = nan_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop] self.input_data = self.input_data[filtered_features + columns_to_drop] nan_counts = self.input_data.isna().sum().sum() if nan_counts != 0: self._log(f"Number of NaN values filled with zeros: {nan_counts}", silent=False) self.input_data = self.input_data.fillna(0) if self.group_col is not None: group_col = self.input_data[[self.group_col]] if self.info_col is not None: info_col = self.input_data[self.info_col] self.input_data = self._convert_categorical_to_dummy() if self.group_col is not None: self.input_data = pd.concat([self.input_data, group_col], axis=1) if self.info_col is not None: self.input_data = pd.concat([self.input_data, info_col], axis=1) if self.base_filtration: filtered_features = const_filtration(self.input_data.drop(columns=columns_to_drop)) self.dropped_features = np.concatenate( ( self.dropped_features, [f for f in self.input_data.columns if f not in filtered_features + columns_to_drop], ) ) self.input_data = self.input_data[filtered_features + columns_to_drop] self._log("Categorical features turned into dummy") def _apply_filter(self, filter_class, *filter_args): """Applies a filter to the input data. Args: filter_class: The class of the filter to apply. *filter_args: Arguments to pass to the filter class. """ filter_instance = filter_class(*filter_args) self.input_data = filter_instance.perform_filter(self.input_data) def _spearman_filter(self): """Applies a filter by dropping columns correlated with the outcome column. This method uses the Spearman filter to eliminate features from the dataset that are highly correlated with the outcome columns, based on a pre-set threshold """ self._log("Applying filter by spearman test - drop columns correlated with outcome") self._apply_filter(SpearmanFilter, self.outcomes[0], self.treatment, self.same_target_threshold) def outliers_filter(self): """Removes outlier values from the dataset. This method employs an OutliersFilter. If `drop_outliers_by_percentile` is True, it retains only the values between the min and max percentiles If `drop_outliers_by_percentile` is False, it retains only the values between 2nd and 98th percentiles """ self._log( f"Applying filter of outliers\n" f"interquartile_coeff={self.interquartile_coeff}\n" f"mode_percentile={self.mode_percentile}\n" f"min_percentile={self.min_percentile}\n" f"max_percentile={self.max_percentile}" ) self._apply_filter( OutliersFilter, self.interquartile_coeff, self.mode_percentile, self.min_percentile, self.max_percentile ) def match_no_rep(self, threshold: float = 0.1, approximate_match: bool = False) -> pd.DataFrame: """Matching groups with no replacement. It's done by optimizing the linear sum of distances between pairs of treatment and control samples. Args: threshold: caliper for minimum deviation between test and control groups. in case weights is not None. approximate_match: use or not approximate matching Returns: Matched dataframe with no replacements. """ a = self.input_data[self.treatment] X = self.input_data.drop(columns=self.treatment) if self.info_col is not None: X = X.drop(columns=self.info_col) index_matched = MatcherNoReplacement(X, a, self.weights, approximate_match).match() filtred_matches = index_matched.loc[1].iloc[self.input_data[a == 1].index].matches[index_matched.loc[1].iloc[self.input_data[a == 1].index].matches.apply(lambda x: x != [])] if self.weights is not None: weighted_features = [f for f in self.weights.keys()] index_dict = dict() for w in weighted_features: source = self.input_data.loc[np.concatenate(filtred_matches.values)][w].values target = self.input_data.loc[filtred_matches.index.to_list()][w].values index = abs(source - target) <= abs(source) * threshold index_dict.update({w: index}) index_filtered = sum(index_dict.values()) == len(self.weights) matched_data = pd.concat( [self.input_data.loc[filtred_matches.index.to_list()].iloc[index_filtered], self.input_data.loc[np.concatenate(filtred_matches.values)].iloc[index_filtered]] ) else: matched_data = pd.concat([self.input_data.loc[filtred_matches.index.to_list()], self.input_data.loc[np.concatenate(filtred_matches.values)]]) return matched_data def lama_feature_select(self) -> pd.DataFrame: """Calculates the importance of each feature. This method use LamaFeatureSelector to rank the importance of each feature in the dataset The features are then sorted by their importance with the most important feature first Returns: The feature importances, sorted in descending order """ self._log("Counting feature importance") feat_select = FeatureSelector( outcome=self.outcomes[0], outcome_type=self.outcome_type, treatment=self.treatment, timeout=self.timeout, n_threads=self.n_threads, n_folds=self.n_folds, verbose=self.verbose, generate_report=self.generate_report, report_dir=self.report_feat_select_dir, use_algos=self.use_algos, ) df = self.input_data if self.group_col is None else self.input_data.drop(columns=self.group_col) if self.info_col is not None: df = df.drop(columns=self.info_col) features = feat_select.perform_selection(df=df) if self.group_col is None: self.features_importance = features else: self.features_importance = features.append( {"Feature": self.group_col, "Importance": features.Importance.max()}, ignore_index=True ) return self.features_importance.sort_values("Importance", ascending=False) def _create_faiss_matcher(self, df=None, validation=None): """Creates a FaissMatcher object. Args: df: The dataframe to use. If None, uses self.input_data. validation: Whether to use the matcher for validation. If None, determines based on whether """ if df is None: df = self.input_data self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, weights=self.weights, features=self.features_importance, group_col=self.group_col, validation=validation, n_neighbors=self.n_neighbors, pbar=False if validation else self.pbar, ) def _perform_validation(self): """Performs validation using the FaissMatcher.""" if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) def _log(self, message, silent=None): """Logs a message at the appropriate level. Args: message: The message to log. silent: If silent, logs will be only info """ if silent is None: silent = self.silent if silent: logger.debug(message) else: logger.info(message) def _matching(self) -> tuple: """Performs matching considering the presence of groups. Returns: Results of matching and matching quality metrics """ self._create_faiss_matcher() self._log("Applying matching") self.results, df_matched = self.matcher.match() self.quality_result = self.matcher.matching_quality(df_matched) return self.results, self.quality_result, df_matched def validate_result( self, refuter: str = "random_feature", effect_type: str = "ate", n_sim: int = 10, fraction: float = 0.8 ) -> dict: """Validates estimated ATE (Average Treatment Effect). Validates estimated effect: 1) by replacing real treatment with random placebo treatment. Estimated effect must be droped to zero, p-val > 0.05; 2) by adding random feature (`random_feature`). Estimated effect shouldn't change significantly, p-val < 0.05; 3) estimates effect on subset of data (default fraction is 0.8). Estimated effect shouldn't change significantly, p-val < 0.05. Args: refuter: Refuter type (`random_treatment`, `random_feature`, `subset_refuter`) effect_type: Which effect to validate (`ate`, `att`, `atc`) n_sim: Number of simulations fraction: Subset fraction for subset refuter only Returns: Dictionary of outcome_name (mean_effect on validation, p-value) """ if self.silent: logger.debug("Applying validation of result") else: logger.info("Applying validation of result") self.val_dict = {k: [] for k in self.outcomes} self.pval_dict = dict() effect_dict = {"ate": 0, "atc": 1, "att": 2} assert effect_type in effect_dict.keys() for i in tqdm(range(n_sim)): if refuter in ["random_treatment", "random_feature"]: if refuter == "random_treatment": self.input_data, orig_treatment, self.validate = random_treatment(self.input_data, self.treatment) elif refuter == "random_feature": self.input_data, self.validate = random_feature(self.input_data) if self.features_importance is not None and i == 0: self.features_importance.append("random_feature") self.matcher = FaissMatcher( self.input_data, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) elif refuter == "subset_refuter": df, self.validate = subset_refuter(self.input_data, self.treatment, fraction) self.matcher = FaissMatcher( df, self.outcomes, self.treatment, info_col=self.info_col, features=self.features_importance, group_col=self.group_col, validation=self.validate, n_neighbors=self.n_neighbors, pbar=False, ) else: logger.error("Incorrect refuter name") raise NameError( "Incorrect refuter name! Available refuters: `random_feature`, `random_treatment`, `subset_refuter`" ) if self.group_col is None: sim = self.matcher.match() else: sim = self.matcher.group_match() for key in self.val_dict.keys(): self.val_dict[key].append(sim[key][0]) for outcome in self.outcomes: self.pval_dict.update({outcome: [np.mean(self.val_dict[outcome])]}) self.pval_dict[outcome].append(
test_significance(
10
2023-11-01 08:58:57+00:00
16k
tianhaowuhz/human-assisting-dex-grasp
Algorithms/ppo/gf_ppo_update.py
[ { "identifier": "RolloutStorage", "path": "Algorithms/ppo/storage.py", "snippet": "class RolloutStorage:\n\n def __init__(self, num_envs, num_transitions_per_env, obs_shape, states_shape, actions_shape, device='cpu', sampler='sequential'):\n\n self.device = device\n self.sampler = sampler\n print(self.sampler)\n\n # Core\n self.observations = torch.zeros(num_transitions_per_env, num_envs, *obs_shape, device=self.device)\n self.states = torch.zeros(num_transitions_per_env, num_envs, *states_shape, device=self.device)\n self.rewards = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device)\n self.actions = torch.zeros(num_transitions_per_env, num_envs, *actions_shape, device=self.device)\n self.dones = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device).byte()\n\n # For PPO\n self.actions_log_prob = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device)\n self.values = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device)\n self.returns = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device)\n self.advantages = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device)\n self.mu = torch.zeros(num_transitions_per_env, num_envs, *actions_shape, device=self.device)\n self.sigma = torch.zeros(num_transitions_per_env, num_envs, *actions_shape, device=self.device)\n\n self.num_transitions_per_env = num_transitions_per_env\n self.num_envs = num_envs\n\n self.step = 0\n\n def add_transitions(self, observations, states, actions, rewards, dones, values, actions_log_prob, mu, sigma):\n if self.step >= self.num_transitions_per_env:\n raise AssertionError(\"Rollout buffer overflow\")\n\n self.observations[self.step].copy_(observations)\n self.states[self.step].copy_(states)\n self.actions[self.step].copy_(actions)\n self.rewards[self.step].copy_(rewards.view(-1, 1))\n self.dones[self.step].copy_(dones.view(-1, 1))\n self.values[self.step].copy_(values)\n self.actions_log_prob[self.step].copy_(actions_log_prob.view(-1, 1))\n self.mu[self.step].copy_(mu)\n self.sigma[self.step].copy_(sigma)\n\n self.step += 1\n\n def clear(self):\n self.step = 0\n\n def compute_returns(self, last_values, gamma, lam):\n advantage = 0\n for step in reversed(range(self.num_transitions_per_env)):\n if step == self.num_transitions_per_env - 1:\n next_values = last_values\n else:\n next_values = self.values[step + 1]\n next_is_not_terminal = 1.0 - self.dones[step].float()\n delta = self.rewards[step] + next_is_not_terminal * gamma * next_values - self.values[step]\n advantage = delta + next_is_not_terminal * gamma * lam * advantage\n self.returns[step] = advantage + self.values[step]\n\n # Compute and normalize the advantages\n self.advantages = self.returns - self.values\n self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-8)\n\n def get_statistics(self):\n done = self.dones.cpu()\n done[-1] = 1\n flat_dones = done.permute(1, 0, 2).reshape(-1, 1)\n done_indices = torch.cat((flat_dones.new_tensor([-1], dtype=torch.int64), flat_dones.nonzero(as_tuple=False)[:, 0]))\n trajectory_lengths = (done_indices[1:] - done_indices[:-1])\n return trajectory_lengths.float().mean(), self.rewards.mean()\n\n def mini_batch_generator(self, num_mini_batches):\n batch_size = self.num_envs * self.num_transitions_per_env\n mini_batch_size = int(batch_size // num_mini_batches)\n # set_trace()\n # print(mini_batch_size)\n if self.sampler == \"sequential\":\n # For physics-based RL, each environment is already randomized. There is no value to doing random sampling\n # but a lot of CPU overhead during the PPO process. So, we can just switch to a sequential sampler instead\n subset = SequentialSampler(range(batch_size))\n elif self.sampler == \"random\":\n subset = SubsetRandomSampler(range(batch_size))\n\n batch = BatchSampler(subset, mini_batch_size, drop_last=True)\n return batch" }, { "identifier": "ActorCritic", "path": "Algorithms/ppo/module.py", "snippet": "class ActorCritic(nn.Module):\n\n def __init__(self, obs_shape, states_shape, actions_shape, initial_std, model_cfg, asymmetric=False, state_base=False, stack_frame_number=3, sub_obs_type=None, num_fingertip=None, pointnet_type='pt2', envs=None, hand_pcl=False, hand_model=None, args=None):\n super(ActorCritic, self).__init__()\n\n # network parameter\n self.asymmetric = asymmetric\n self.state_base = state_base\n self.stack_frame_number = stack_frame_number\n self.sub_obs_type = sub_obs_type\n self.num_fingertip = num_fingertip\n self.disentangle_hand = model_cfg['distengle']\n self.norm_action = model_cfg['norm_action']\n self.action_scale = model_cfg['action_scale']\n self.pointnet_type = pointnet_type\n self.envs = envs\n self.hand_pcl = hand_pcl\n self.hand_model = hand_model\n \n '''\n init network: current we set self.state_base = False, only set true for pure state input\n '''\n if not self.state_base:\n # get model cfg\n if model_cfg is None:\n self.hand_joint_dim = 18\n self.hand_wrist_dim = 7 * self.stack_frame_number\n actor_hidden_dim = 256\n critic_hidden_dim = 256\n activation = get_activation(\"selu\")\n self.shared_pointnet = True\n self.points_per_object = 1024\n else:\n # get input dim\n self.hand_joint_dim = model_cfg['hand_joint_dim']\n self.hand_wrist_dim = model_cfg['hand_wrist_dim'] * self.stack_frame_number\n\n # fingertip obs dim\n if \"fingertipjoint\" in self.sub_obs_type:\n self.fingertip_dim = self.num_fingertip-1\n else:\n self.fingertip_dim = 0\n\n if \"disfingertip\" in self.sub_obs_type:\n self.fingertip_dim += self.num_fingertip*1\n elif \"absfingertip\" in self.sub_obs_type:\n self.fingertip_dim += self.num_fingertip*3\n\n # obj pose obs dim\n if \"objpose\" in self.sub_obs_type:\n self.objpose_dim = 7\n else:\n self.objpose_dim = 0\n \n # diso2o obs dim\n if \"diso2o\" in self.sub_obs_type:\n self.diso2o_dim = 1\n else:\n self.diso2o_dim = 0\n \n # goal obs dim\n if \"goal\" in self.sub_obs_type:\n self.goal_dim = 18\n else:\n self.goal_dim = 0\n \n # gf obs dim\n if 'gf' in self.sub_obs_type:\n self.gf_dim = actions_shape[0]\n else:\n self.gf_dim = 0\n\n # network parameter\n actor_hidden_dim = model_cfg['pi_hid_sizes']\n critic_hidden_dim = model_cfg['vf_hid_sizes']\n activation = get_activation(model_cfg['activation'])\n self.shared_pointnet = model_cfg['shared_pointnet']\n self.points_per_object = model_cfg['points_per_object']\n\n self.action_dim = actions_shape[0]\n\n '''\n actor layer\n '''\n # state encoder\n if self.disentangle_hand:\n self.actor_hand_joint_global_enc = nn.Sequential(\n nn.Linear(self.hand_joint_dim + self.fingertip_dim + self.objpose_dim + self.diso2o_dim + self.goal_dim, actor_hidden_dim),\n activation,\n )\n \n self.actor_hand_wrist_global_enc = nn.Sequential(\n nn.Linear(self.hand_wrist_dim, actor_hidden_dim),\n activation,\n )\n \n if 'gf' in self.sub_obs_type:\n self.actor_grad_enc = nn.Sequential(\n nn.Linear(*actions_shape, actor_hidden_dim),\n activation,\n )\n else:\n self.state_dim = self.hand_joint_dim + self.hand_wrist_dim + self.fingertip_dim + self.objpose_dim + self.diso2o_dim + self.goal_dim + self.gf_dim\n self.actor_hand_global_enc = nn.Sequential(\n nn.Linear(self.state_dim, actor_hidden_dim),\n activation,\n nn.Linear(actor_hidden_dim, actor_hidden_dim),\n activation,\n nn.Linear(actor_hidden_dim, actor_hidden_dim),\n activation,\n )\n\n # pointcloud feature encoder\n self.actor_obj_global_enc = nn.Sequential(\n nn.Linear(self.points_per_object, actor_hidden_dim),\n activation,\n )\n\n # mlp output\n if self.disentangle_hand:\n if 'gf' in self.sub_obs_type:\n total_feat_num = 2 + 1 + 1\n else:\n total_feat_num = 2 + 1\n else:\n total_feat_num = 1 + 1\n\n if self.disentangle_hand:\n self.actor_mlp1 = nn.Sequential(\n nn.Linear(actor_hidden_dim*total_feat_num, actor_hidden_dim),\n activation,\n )\n else:\n self.actor_mlp1 = nn.Sequential(\n nn.Linear(actor_hidden_dim*total_feat_num, actor_hidden_dim),\n activation,\n nn.Linear(actor_hidden_dim, actor_hidden_dim),\n activation,\n )\n\n # norm output action\n if self.norm_action:\n self.actor_mlp2 = nn.Sequential(\n nn.Linear(actor_hidden_dim, *actions_shape),\n get_activation(\"tanh\"),\n )\n else:\n self.actor_mlp2 = nn.Sequential(\n nn.Linear(actor_hidden_dim, *actions_shape),\n )\n\n '''\n critic layer\n '''\n # state encoder\n if self.disentangle_hand:\n self.critic_hand_joint_global_enc = nn.Sequential(\n nn.Linear(self.hand_joint_dim + self.fingertip_dim + self.objpose_dim + self.diso2o_dim + self.goal_dim, critic_hidden_dim),\n activation,\n )\n \n self.critic_hand_wrist_global_enc = nn.Sequential(\n nn.Linear(self.hand_wrist_dim, critic_hidden_dim),\n activation,\n )\n \n if 'gf' in self.sub_obs_type:\n self.critic_grad_enc = nn.Sequential(\n nn.Linear(*actions_shape, critic_hidden_dim),\n activation,\n )\n else:\n self.state_dim = self.hand_joint_dim + self.hand_wrist_dim + self.fingertip_dim + self.objpose_dim + self.diso2o_dim + self.goal_dim + self.gf_dim\n self.critic_hand_global_enc = nn.Sequential(\n nn.Linear(self.state_dim, critic_hidden_dim),\n activation,\n nn.Linear(critic_hidden_dim, critic_hidden_dim),\n activation,\n nn.Linear(critic_hidden_dim, critic_hidden_dim),\n activation,\n )\n\n # pointcloud feature encoder\n self.critic_obj_global_enc = nn.Sequential(\n nn.Linear(self.points_per_object, critic_hidden_dim),\n activation,\n )\n\n # mlp output\n if self.disentangle_hand:\n self.critic_mlp1 = nn.Sequential(\n nn.Linear(critic_hidden_dim*total_feat_num, critic_hidden_dim),\n activation,\n )\n\n if args.exp_name == 'ilad':\n self.additional_critic_mlp1 = nn.Sequential(\n nn.Linear(critic_hidden_dim*total_feat_num + self.action_dim, critic_hidden_dim),\n activation,\n )\n else:\n self.critic_mlp1 = nn.Sequential(\n nn.Linear(critic_hidden_dim*total_feat_num, critic_hidden_dim),\n activation,\n nn.Linear(critic_hidden_dim, critic_hidden_dim),\n activation,\n )\n\n if args.exp_name == 'ilad':\n self.additional_critic_mlp1 = nn.Sequential(\n nn.Linear(critic_hidden_dim*total_feat_num + self.action_dim, critic_hidden_dim),\n activation,\n nn.Linear(critic_hidden_dim, 1),\n )\n self.critic_mlp2 = nn.Sequential(\n nn.Linear(critic_hidden_dim, 1),\n )\n\n '''\n shared layer\n '''\n if self.shared_pointnet:\n if self.pointnet_type == 'pt':\n self.pointnet_enc = PointNetEncoder()\n elif self.pointnet_type == 'pt2':\n self.pointnet_enc = Pointnet2Backbone() # for pointnet2\n else:\n if self.pointnet_type == 'pt':\n self.actor_pointnet_enc = PointNetEncoder()\n self.critic_pointnet_enc = PointNetEncoder()\n elif self.pointnet_type == 'pt2':\n self.actor_pointnet_enc = Pointnet2Backbone() # for pointnet2\n self.critic_pointnet_enc = Pointnet2Backbone() # for pointnet2\n\n # Action noise\n self.log_std = nn.Parameter(np.log(initial_std) * torch.ones(*actions_shape))\n else:\n # get model config\n if model_cfg is None:\n actor_hidden_dim = [256, 256, 256]\n critic_hidden_dim = [256, 256, 256]\n activation = get_activation(\"selu\")\n else:\n if local:\n actor_hidden_dim = [256, 256, 256]\n critic_hidden_dim = [256, 256, 256]\n activation = get_activation(\"selu\")\n else:\n actor_hidden_dim = model_cfg['pi_hid_sizes']\n critic_hidden_dim = model_cfg['vf_hid_sizes']\n activation = get_activation(model_cfg['activation'])\n\n # Policy\n actor_layers = []\n actor_layers.append(nn.Linear(*obs_shape, actor_hidden_dim[0]))\n actor_layers.append(activation)\n for l in range(len(actor_hidden_dim)):\n if l == len(actor_hidden_dim) - 1:\n actor_layers.append(nn.Linear(actor_hidden_dim[l], *actions_shape))\n else:\n actor_layers.append(nn.Linear(actor_hidden_dim[l], actor_hidden_dim[l + 1]))\n actor_layers.append(activation)\n self.actor = nn.Sequential(*actor_layers)\n\n # Value function\n critic_layers = []\n if self.asymmetric:\n critic_layers.append(nn.Linear(*states_shape, critic_hidden_dim[0]))\n else:\n critic_layers.append(nn.Linear(*obs_shape, critic_hidden_dim[0]))\n critic_layers.append(activation)\n for l in range(len(critic_hidden_dim)):\n if l == len(critic_hidden_dim) - 1:\n critic_layers.append(nn.Linear(critic_hidden_dim[l], 1))\n else:\n critic_layers.append(nn.Linear(critic_hidden_dim[l], critic_hidden_dim[l + 1]))\n critic_layers.append(activation)\n self.critic = nn.Sequential(*critic_layers)\n\n print(self.actor)\n print(self.critic)\n\n # Action noise\n self.log_std = nn.Parameter(np.log(initial_std) * torch.ones(*actions_shape))\n\n # Initialize the weights like in stable baselines\n actor_weights = [np.sqrt(2)] * len(actor_hidden_dim)\n actor_weights.append(0.01)\n critic_weights = [np.sqrt(2)] * len(critic_hidden_dim)\n critic_weights.append(1.0)\n self.init_weights(self.actor, actor_weights)\n self.init_weights(self.critic, critic_weights)\n \n @staticmethod\n def init_weights(sequential, scales):\n [torch.nn.init.orthogonal_(module.weight, gain=scales[idx]) for idx, module in\n enumerate(mod for mod in sequential if isinstance(mod, nn.Linear))]\n\n def forward(self):\n raise NotImplementedError\n \n def forward_actor(self, observations):\n '''\n process observation\n '''\n batch_size = observations.size(0)\n \n if self.disentangle_hand:\n hand_joint_batch, hand_wrist_batch, grad_batch, obj_batch = self.process_observations(observations=observations)\n else:\n state_batch, obj_batch = self.process_observations(observations=observations)\n \n '''\n forward\n '''\n # pointcloud encoder\n if self.shared_pointnet:\n if self.pointnet_type == 'pt':\n obj_feat, _, _ = self.pointnet_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1))\n elif self.pointnet_type == 'pt2':\n obj_feat, _ = self.pointnet_enc(obj_batch.reshape(batch_size,-1,3))\n else:\n if self.pointnet_type == 'pt':\n obj_feat, _, _ = self.actor_pointnet_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1))\n elif self.pointnet_type == 'pt2':\n obj_feat, _ = self.actor_pointnet_enc(obj_batch.reshape(batch_size,-1,3))\n obj_feat = self.actor_obj_global_enc(obj_feat.reshape(batch_size,-1)) # B x 512\n\n # state encoder\n if self.disentangle_hand:\n hand_joint_global_feat = self.actor_hand_joint_global_enc(hand_joint_batch) # B x 512\n hand_wrist_global_feat = self.actor_hand_wrist_global_enc(hand_wrist_batch) # B x 512\n hand_global_feat = torch.cat([hand_wrist_global_feat, hand_joint_global_feat],-1) \n\n if 'gf' in self.sub_obs_type:\n grad_feat = self.actor_grad_enc(grad_batch) # B x 512\n total_feat = torch.cat([hand_global_feat, obj_feat, grad_feat],-1)\n else:\n total_feat = torch.cat([hand_global_feat, obj_feat],-1)\n else:\n hand_global_feat = self.actor_hand_global_enc(state_batch)\n total_feat = torch.cat([hand_global_feat, obj_feat],-1)\n\n # mlp\n x = self.actor_mlp1(total_feat)\n x = self.actor_mlp2(x)*self.action_scale\n return x\n \n def forward_critic(self, observations):\n \"\"\"\n process observation\n \"\"\"\n batch_size = observations.size(0)\n\n if self.disentangle_hand:\n hand_joint_batch, hand_wrist_batch, grad_batch, obj_batch = self.process_observations(observations=observations)\n else:\n state_batch, obj_batch = self.process_observations(observations=observations)\n\n '''\n forward\n '''\n # point cloud encoder\n if self.shared_pointnet:\n if self.pointnet_type == 'pt':\n obj_feat, _, _ = self.pointnet_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1))\n elif self.pointnet_type == 'pt2':\n obj_feat, _ = self.pointnet_enc(obj_batch.reshape(batch_size,-1,3))\n else:\n if self.pointnet_type == 'pt':\n obj_feat, _, _ = self.critic_pointnet_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1))\n elif self.pointnet_type == 'pt2':\n obj_feat, _ = self.critic_pointnet_enc(obj_batch.reshape(batch_size,-1,3))\n obj_feat = self.critic_obj_global_enc(obj_feat.reshape(batch_size,-1)) # B x 512\n \n # state encoder\n if self.disentangle_hand:\n hand_joint_global_feat = self.critic_hand_joint_global_enc(hand_joint_batch) # B x 512\n hand_wrist_global_feat = self.critic_hand_wrist_global_enc(hand_wrist_batch) # B x 512\n hand_global_feat = torch.cat([hand_wrist_global_feat, hand_joint_global_feat],-1)\n\n if 'gf' in self.sub_obs_type:\n grad_feat = self.critic_grad_enc(grad_batch) # B x 512\n total_feat = torch.cat([hand_global_feat, obj_feat, grad_feat],-1)\n else:\n total_feat = torch.cat([hand_global_feat, obj_feat],-1)\n else:\n hand_global_feat = self.critic_hand_global_enc(state_batch)\n \n total_feat = torch.cat([hand_global_feat, obj_feat],-1)\n \n # mlp\n x = self.critic_mlp1(total_feat)\n x = self.critic_mlp2(x)\n return x\n\n def forward_additional_critic(self, observations, actions):\n \"\"\"\n process observation\n \"\"\"\n batch_size = observations.size(0)\n\n if self.disentangle_hand:\n hand_joint_batch, hand_wrist_batch, grad_batch, obj_batch = self.process_observations(observations=observations)\n else:\n state_batch, obj_batch = self.process_observations(observations=observations)\n\n '''\n forward\n '''\n # point cloud encoder\n if self.shared_pointnet:\n if self.pointnet_type == 'pt':\n obj_feat, _, _ = self.pointnet_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1))\n elif self.pointnet_type == 'pt2':\n obj_feat, _ = self.pointnet_enc(obj_batch.reshape(batch_size,-1,3))\n else:\n if self.pointnet_type == 'pt':\n obj_feat, _, _ = self.critic_pointnet_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1))\n elif self.pointnet_type == 'pt2':\n obj_feat, _ = self.critic_pointnet_enc(obj_batch.reshape(batch_size,-1,3))\n obj_feat = self.critic_obj_global_enc(obj_feat.reshape(batch_size,-1)) # B x 512\n \n # state encoder\n if self.disentangle_hand:\n hand_joint_global_feat = self.critic_hand_joint_global_enc(hand_joint_batch) # B x 512\n hand_wrist_global_feat = self.critic_hand_wrist_global_enc(hand_wrist_batch) # B x 512\n hand_global_feat = torch.cat([hand_wrist_global_feat, hand_joint_global_feat],-1)\n\n if 'gf' in self.sub_obs_type:\n grad_feat = self.critic_grad_enc(grad_batch) # B x 512\n total_feat = torch.cat([hand_global_feat, obj_feat, grad_feat],-1)\n else:\n total_feat = torch.cat([hand_global_feat, obj_feat],-1)\n else:\n hand_global_feat = self.critic_hand_global_enc(state_batch)\n \n total_feat = torch.cat([hand_global_feat, obj_feat],-1)\n \n # mlp\n total_feat = torch.concat([total_feat, actions], -1)\n x = self.additional_critic_mlp1(total_feat)\n return x\n\n def process_observations(self,observations):\n '''\n get all obs batch\n '''\n hand_joint_batch = observations[:,:self.hand_joint_dim] # B x 18\n hand_wrist_batch = observations[:,self.hand_joint_dim:self.hand_joint_dim+self.hand_wrist_dim] # B x 7 * sfn\n fingertip_idx = self.hand_joint_dim+self.hand_wrist_dim+self.points_per_object*3\n obj_batch = observations[:,self.hand_joint_dim+self.hand_wrist_dim:fingertip_idx] # B x 1024*3\n\n if self.hand_pcl:\n hand_pos_2_w = hand_wrist_batch[:,:3].clone()\n hand_quat_2_w = hand_wrist_batch[:,3:7].clone()\n hand_pos_2_h, hand_quat_2_h = self.envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)\n\n ori_hand_dof = self.envs.dof_norm(hand_joint_batch.clone(),inv=True)\n hand_pcl_2h = self.hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)\n obj_batch = torch.cat([obj_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),-1)],1)\n\n if \"fingertipjoint\" in self.sub_obs_type:\n fingertipjoint_batch = observations[:,fingertip_idx:fingertip_idx+self.num_fingertip-1]\n fingertip_idx = fingertip_idx+self.num_fingertip-1\n\n if \"disfingertip\" in self.sub_obs_type:\n fingertip_batch = observations[:,fingertip_idx:fingertip_idx+self.num_fingertip]\n objpose_idx = fingertip_idx+self.num_fingertip\n fingertip_batch = torch.cat([fingertipjoint_batch,fingertip_batch],-1)\n elif \"absfingertip\" in self.sub_obs_type:\n fingertip_batch = observations[:,fingertip_idx:fingertip_idx+self.num_fingertip*3]\n objpose_idx = fingertip_idx+self.num_fingertip*3\n fingertip_batch = torch.cat([fingertipjoint_batch,fingertip_batch],-1)\n else:\n objpose_idx = fingertip_idx \n fingertip_batch = fingertipjoint_batch\n else:\n objpose_idx = fingertip_idx \n\n if \"objpose\" in self.sub_obs_type:\n objpose_batch = observations[:,objpose_idx:objpose_idx+7]\n diso2o_idx = objpose_idx+7\n else:\n diso2o_idx = objpose_idx\n\n if \"diso2o\" in self.sub_obs_type:\n diso2o_batch = observations[:,diso2o_idx:diso2o_idx+1]\n goal_idx = diso2o_idx + 1\n else:\n goal_idx = diso2o_idx\n\n if \"goal\" in self.sub_obs_type:\n goal_batch = observations[:,goal_idx:goal_idx+18]\n\n if 'gf' in self.sub_obs_type:\n grad_batch = observations[:,-self.action_dim:] # B x 18\n else:\n grad_batch = None\n\n if self.disentangle_hand:\n if \"fingertip\" in self.sub_obs_type:\n hand_joint_batch = torch.cat([hand_joint_batch, fingertip_batch], -1)\n if \"objpose\" in self.sub_obs_type:\n hand_joint_batch = torch.cat([hand_joint_batch, objpose_batch], -1)\n if \"diso2o\" in self.sub_obs_type:\n hand_joint_batch = torch.cat([hand_joint_batch, diso2o_batch], -1)\n if \"goal\" in self.sub_obs_type:\n hand_joint_batch = torch.cat([hand_joint_batch, goal_batch], -1)\n \n return hand_joint_batch, hand_wrist_batch, grad_batch, obj_batch\n else:\n state_batch = torch.cat([hand_wrist_batch,hand_joint_batch],-1)\n if \"fingertip\" in self.sub_obs_type:\n state_batch = torch.cat([state_batch, fingertip_batch],-1)\n \n if \"objpose\" in self.sub_obs_type:\n state_batch = torch.cat([state_batch, objpose_batch], -1)\n \n if \"diso2o\" in self.sub_obs_type:\n state_batch = torch.cat([state_batch, diso2o_batch], -1)\n \n if \"goal\" in self.sub_obs_type:\n state_batch = torch.cat([state_batch, goal_batch],-1)\n\n if 'gf' in self.sub_obs_type:\n state_batch = torch.cat([state_batch, grad_batch], -1)\n \n return state_batch, obj_batch\n \n def act(self, observations, states):\n if self.state_base:\n actions_mean = self.actor(observations)\n else:\n actions_mean = self.forward_actor(observations)\n\n # print(self.log_std)\n covariance = torch.diag(self.log_std.exp() * self.log_std.exp())\n distribution = MultivariateNormal(actions_mean, scale_tril=covariance)\n\n actions = distribution.sample()\n actions_log_prob = distribution.log_prob(actions)\n\n if self.asymmetric:\n value = self.critic(states)\n else:\n if self.state_base:\n value = self.critic(observations)\n else:\n value = self.forward_critic(observations)\n\n return actions.detach(), actions_log_prob.detach(), value.detach(), actions_mean.detach(), self.log_std.repeat(actions_mean.shape[0], 1).detach()\n\n def cal_actions_log_prob(self,observations, actions):\n if self.state_base:\n actions_mean = self.actor(observations)\n else:\n actions_mean = self.forward_actor(observations)\n\n covariance = torch.diag(self.log_std.exp() * self.log_std.exp())\n distribution = MultivariateNormal(actions_mean, scale_tril=covariance)\n\n actions_log_prob = distribution.log_prob(actions)\n return actions.detach(), actions_log_prob.detach(), actions_mean.detach()\n \n def act_inference(self, observations):\n if self.state_base:\n actions_mean = self.actor(observations)\n else:\n actions_mean = self.forward_actor(observations)\n return actions_mean\n\n def evaluate(self, observations, states, actions):\n if self.state_base:\n actions_mean = self.actor(observations)\n else:\n actions_mean = self.forward_actor(observations)\n\n covariance = torch.diag(self.log_std.exp() * self.log_std.exp())\n distribution = MultivariateNormal(actions_mean, scale_tril=covariance)\n\n actions_log_prob = distribution.log_prob(actions)\n entropy = distribution.entropy()\n\n if self.asymmetric:\n value = self.critic(states)\n else:\n if self.state_base:\n value = self.critic(observations)\n else:\n value = self.forward_critic(observations)\n\n return actions_log_prob, entropy, value, actions_mean, self.log_std.repeat(actions_mean.shape[0], 1)" }, { "identifier": "loss_fn_cond", "path": "Algorithms/SDE_update.py", "snippet": "def loss_fn_cond(model, x, marginal_prob_fn, sde_fn, is_likelihood_weighting=False, eps=1e-5, device='cuda:0', hand_pcl=False, full_state=None, envs=None, hand_model=None, space='euler', relative=True):\n \"\"\"\n is_likelihood_weighting = True, can potentially improve likelihood-estimation (e.g., for reward learning)\n \"\"\"\n hand_dof_batch, obj_pcl_batch = x\n if space == 'riemann':\n hand_dof_batch = action2grad(hand_dof_batch, relative=relative)\n batchsize = hand_dof_batch.shape[0]\n random_t = torch.rand(batchsize, device=device) * (1. - eps) + eps\n # random_t = torch.pow(10,-5*random_t) \n random_t = random_t.unsqueeze(-1)\n z = torch.randn_like(hand_dof_batch)\n mu, std = marginal_prob_fn(hand_dof_batch, random_t)\n perturbed_hand_dof_batch = mu + z * std\n\n if hand_pcl:\n if space == 'riemann':\n hand_dof = action2grad(perturbed_hand_dof_batch.clone(), relative=relative, inv=True)\n else:\n hand_dof = perturbed_hand_dof_batch.clone() \n hand_pos_2_w = full_state[:,18:21].clone().to(device).float()\n hand_quat_2_w = full_state[:,21:25].clone().to(device).float()\n hand_pos_2_h, hand_quat_2_h = envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)\n\n ori_hand_dof = envs.dof_norm(hand_dof.clone(),inv=True)\n hand_pcl_2h = hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)\n obj_pcl_batch = torch.cat([obj_pcl_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),hand_pcl_2h.size(2),hand_pcl_2h.size(1))],2)\n\n output = model((perturbed_hand_dof_batch.reshape(batchsize, -1, 1), obj_pcl_batch), random_t)\n\n total_loss = (output + z / std) ** 2\n if is_likelihood_weighting:\n _, diffusion_coeff = sde_fn(random_t)\n loss_weighting = diffusion_coeff ** 2\n node_l2 = torch.sum(total_loss, dim=-1) * loss_weighting\n else:\n loss_weighting = std ** 2\n node_l2 = torch.sum(total_loss * loss_weighting, dim=-1)\n loss_ = torch.mean(node_l2)\n return loss_" }, { "identifier": "cond_ode_sampler", "path": "Algorithms/SDE_update.py", "snippet": "def cond_ode_sampler(\n score_model,\n prior_fn,\n sde_fn,\n state,\n batch_size=64,\n atol=1e-5,\n rtol=1e-5,\n device='cuda',\n eps=1e-5,\n t0=1,\n num_steps=None,\n is_random=True,\n denoise=True, \n hand_pcl=False, \n full_state=None, \n envs=None, \n hand_model=None,\n space='euler',\n relative=True,\n):\n hand_dof_batch, obj_pcl_batch = state\n if space == 'riemann':\n hand_dof_batch = action2grad(hand_dof_batch, relative=relative)\n t0_ = torch.ones(batch_size, device=device)*t0\n\n if is_random:\n init_x = prior_fn(hand_dof_batch.shape).to(device) # normal distribution\n # init_x = torch.randn_like(hand_dof_batch, device=device) * marginal_prob_std(t0_)\n # init_x = -torch.ones_like(hand_dof_batch, device=device)\n # init_x = torch.tensor([ 0.0000, -0.7143, -1.0000, 0.0000, -0.7143, -1.0000, 0.0000, -0.7143,\n # -1.0000, -1.0000, 0.0000, -0.7143, -1.0000, 0.0000, -1.0000, 0.0000,\n # 0.0000, -1.0000,1,1,1,1,1,1,1], device=device).reshape(1,-1)[:,:hand_dof_batch.size(1)].expand_as(hand_dof_batch)\n else:\n batch_size = hand_dof_batch.size(0)\n init_x = hand_dof_batch\n \n # Create the latent code\n # init_x = torch.randn_like(hand_dof_batch, device=device) * marginal_prob_std(t0_)\n # !!! for dex hand only, set to same init state\n # init_x = hand_dof_batch\n shape = init_x.shape\n state_dim = shape[-1]\n\n def score_eval_wrapper(sample, time_steps):\n \"\"\"A wrapper of the score-based model for use by the ODE solver.\"\"\"\n with torch.no_grad():\n score = score_model(sample, time_steps)\n # return score.cpu().numpy().reshape((-1,))\n return score.cpu().numpy().reshape(-1)\n\n def ode_func(t, x):\n \"\"\"The ODE function for use by the ODE solver.\"\"\"\n x = torch.tensor(x.reshape(-1, state_dim)).to(device).float()\n time_steps = torch.ones(batch_size, device=device).unsqueeze(1) * t\n # if batch_size == 1:\n # time_steps = torch.ones(batch_size, device=device).unsqueeze(1) * t\n # else:\n # time_steps = torch.ones(batch_size, device=device) * t\n drift, diffusion = sde_fn(torch.tensor(t))\n drift = drift.cpu().numpy()\n diffusion = diffusion.cpu().numpy()\n if hand_pcl:\n hand_dof = x.clone() \n hand_pos_2_w = full_state[:,18:21].clone().to(device).float()\n hand_quat_2_w = full_state[:,21:25].clone().to(device).float()\n hand_pos_2_h, hand_quat_2_h = envs.transform_target2source(hand_quat_2_w, hand_pos_2_w, hand_quat_2_w, hand_pos_2_w)\n\n if space == 'riemann':\n hand_dof = action2grad(hand_dof.clone(), relative=relative, inv=True)\n else:\n hand_dof = perturbed_hand_dof_batch.clone() \n\n ori_hand_dof = envs.dof_norm(hand_dof.clone(),inv=True)\n hand_pcl_2h = hand_model.get_hand_pcl(hand_pos=hand_pos_2_h, hand_quat=hand_quat_2_h, hand_dof=ori_hand_dof)\n objhand_pcl_batch = torch.cat([obj_pcl_batch, hand_pcl_2h.reshape(hand_pcl_2h.size(0),hand_pcl_2h.size(2),hand_pcl_2h.size(1))],2)\n gradient = score_eval_wrapper((x, objhand_pcl_batch), time_steps)\n else:\n gradient = score_eval_wrapper((x, obj_pcl_batch), time_steps)\n # gradient[:6]*=100\n # gradient[6:30]*=10\n return drift - 0.5 * (diffusion**2) * gradient\n \n # Run the black-box ODE solver.\n t_eval = None\n if num_steps is not None:\n # num_steps, from t0 -> eps\n t_eval = np.linspace(t0, eps, num_steps)\n\n res = integrate.solve_ivp(ode_func, (t0, eps), init_x.reshape(-1).cpu().numpy(), rtol=rtol, atol=atol,\n method='RK45', t_eval=t_eval)\n # process, xs: [total_nodes*3, samples_num]\n # clamp for now TODO\n # xs = torch.clamp(torch.tensor(res.y, device=device).T, min=-1.0, max=1.0) \n xs = torch.tensor(res.y, device=device).T\n xs = xs.view(num_steps, hand_dof_batch.shape[0], -1)\n\n # result x: [total_nodes, 3]\n x = torch.clamp(torch.tensor(res.y[:, -1], device=device).reshape(shape), min=-1.0, max=1.0)\n # x = torch.tensor(res.y[:, -1], device=device).reshape(shape)\n\n # denoise, using the predictor step in P-C sampler\n if denoise:\n # Reverse diffusion predictor for denoising\n vec_eps = torch.ones((x.shape[0], 1), device=x.device) * eps\n drift, diffusion = sde_fn(vec_eps)\n grad = score_model((x.float(), obj_pcl_batch), vec_eps)\n drift = drift - diffusion ** 2 * grad # R-SDE\n mean_x = x + drift * ((1 - eps) / (1000 if num_steps is None else num_steps))\n x = mean_x\n \n if space=='riemann':\n xs = action2grad(xs, inv=True, relative=relative)\n x = action2grad(x, inv=True, relative=relative)\n \n return xs, x" }, { "identifier": "init_sde", "path": "Algorithms/SDE_update.py", "snippet": "def init_sde(sde_mode, min=0.1, max=10.0):\n # the SDE-related hyperparameters are copied from https://github.com/yang-song/score_sde_pytorch\n if sde_mode == 've':\n sigma_min = 0.01\n sigma_max = 90\n prior_fn = functools.partial(ve_prior, sigma_min=sigma_min, sigma_max=sigma_max)\n marginal_prob_fn = functools.partial(ve_marginal_prob, sigma_min=sigma_min, sigma_max=sigma_max)\n sde_fn = functools.partial(ve_sde, sigma_min=sigma_min, sigma_max=sigma_max)\n elif sde_mode == 'vp':\n beta_0 = min\n beta_1 = max\n print(beta_0, beta_1)\n prior_fn = functools.partial(vp_prior, beta_0=beta_0, beta_1=beta_1)\n marginal_prob_fn = functools.partial(vp_marginal_prob, beta_0=beta_0, beta_1=beta_1)\n sde_fn = functools.partial(vp_sde, beta_0=beta_0, beta_1=beta_1)\n elif sde_mode == 'subvp':\n beta_0 = 0.1\n beta_1 = 20\n prior_fn = functools.partial(subvp_prior, beta_0=beta_0, beta_1=beta_1)\n marginal_prob_fn = functools.partial(subvp_marginal_prob, beta_0=beta_0, beta_1=beta_1)\n sde_fn = functools.partial(subvp_sde, beta_0=beta_0, beta_1=beta_1)\n else:\n raise NotImplementedError\n return prior_fn, marginal_prob_fn, sde_fn" }, { "identifier": "CondScoreModel", "path": "Networks/SDENets_update.py", "snippet": "class CondScoreModel(nn.Module):\n def __init__(self, marginal_prob_func, hidden_dim, embed_dim, state_dim=1,\n mode='target', relative=False, pointnet_version='pt2', n_blocks=0, feature_dim_coff=1, space='euler'):\n super(CondScoreModel, self).__init__()\n self.marginal_prob_func = marginal_prob_func\n self.point_feat_dim = 1088\n hidden_dim = hidden_dim\n embed_dim = embed_dim\n self.embed_dim = embed_dim\n self.mode = mode\n self.pointnet_version = pointnet_version\n if relative:\n hand_state_dim = 18\n if space == 'riemann':\n hand_state_dim = 18+18\n else:\n hand_state_dim = 25\n if space == 'riemann':\n hand_state_dim = 25+18\n \n self.n_blocks = n_blocks\n self.hand_global_enc = nn.Sequential(\n nn.Linear(hand_state_dim, hidden_dim),\n nn.ReLU(False),\n nn.Linear(hidden_dim, hidden_dim),\n nn.ReLU(False),\n )\n # obj pcl feature encoder\n if pointnet_version == 'pt':\n self.obj_enc = PointNetEncoder(global_feat=True, feature_transform=False, channel=3) # for pointnet\n elif pointnet_version == 'pt2':\n self.obj_enc = Pointnet2Backbone(feature_dim_coff=feature_dim_coff) # for pointnet2\n # self.obj_enc = PointNetEncoder() # for pointnet2\n # self.obj_cat_embed = nn.Embedding(301,512)\n\n if self.n_blocks < 1:\n self.obj_global_enc = nn.Sequential(\n nn.Linear(1024, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, embed_dim),\n nn.ReLU(),\n )\n self.embed_sigma = nn.Sequential(GaussianFourierProjection(embed_dim=embed_dim),\n nn.Linear(embed_dim, embed_dim))\n\n if n_blocks < 1:\n self.init_enc = nn.Sequential(\n nn.Linear(state_dim, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, self.point_feat_dim),\n nn.ReLU(),\n )\n\n # cond_dim = hidden_dim*2 + embed_dim*2 # consider wall\n if self.mode == 'target':\n cond_dim = embed_dim\n \n # self.mhca = MHCA(num_heads=2, inp_dim=self.point_feat_dim, hid_dim=self.point_feat_dim)\n ''' main backbone '''\n # # mlp1\n self.mlp1_main = nn.Sequential(\n nn.Linear((hidden_dim + embed_dim*2), hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, hidden_dim),\n )\n # # mlp2\n self.mlp2_main = nn.Sequential(\n nn.Linear(hidden_dim + embed_dim*2, hidden_dim),\n nn.ReLU(),\n nn.Linear(hidden_dim, hand_state_dim),\n )\n else:\n self.pre_dense_cond = nn.Linear(1024*feature_dim_coff, hidden_dim)\n self.pre_dense_t = nn.Linear(embed_dim, hidden_dim)\n # self.pre_gnorm = nn.GroupNorm(32, num_channels=hidden_dim)\n\n for idx in range(n_blocks):\n setattr(self, f'b{idx+1}_dense1', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense1_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense1_cond', nn.Linear(hidden_dim, hidden_dim))\n # setattr(self, f'b{idx+1}_gnorm1', nn.GroupNorm(32, num_channels=hidden_dim))\n\n setattr(self, f'b{idx+1}_dense2', nn.Linear(hidden_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense2_t', nn.Linear(embed_dim, hidden_dim))\n setattr(self, f'b{idx+1}_dense2_cond', nn.Linear(hidden_dim, hidden_dim))\n # setattr(self, f'b{idx+1}_gnorm2', nn.GroupNorm(32, num_channels=hidden_dim))\n\n self.act = nn.ReLU(False)\n self.post_dense = nn.Linear(hidden_dim, hand_state_dim) \n\n def forward(self, batches, t, obj_feature=False):\n \"\"\"\n batches = hand_batch, obj_batch\n hand_batch: [bs, 25, 1]\n obj_batch: [bs, 3, 1024]\n t: [bs] !! not [bs, 1] !!\n \"\"\"\n hand_batch, obj_batch = batches\n batch_size = hand_batch.size(0)\n hand_dof = hand_batch.size(1)\n ''' get cond feat'''\n\n # sigma_feat: [num_nodes, embed_dim]\n sigma_feat = F.relu(self.embed_sigma(t.squeeze(-1)),inplace=False)\n\n # total_cond_feat: [num_nodes, hidden_dim*2+embed_dim*2]\n # obj_feat,_, _ = self.obj_enc(obj_batch.reshape(batch_size,-1,3)) # B x 1024\n\n ## no cuda pointnet2\n # obj_feat,_ = self.obj_enc(obj_batch) # B x 1024\n # obj_feat = self.obj_global_enc(obj_feat)\n if self.pointnet_version == 'pt':\n obj_feat,_,_ = self.obj_enc(obj_batch.reshape(batch_size,-1,3).permute(0,2,1)) # B x 1024\n elif self.pointnet_version == 'pt2':\n ## cuda pointnet2\n obj_feat,_ = self.obj_enc(obj_batch.reshape(batch_size,-1,3)) # B x 1024\n ## pointnet\n\n if obj_feature:\n obj_feat_fr = obj_feat.clone()\n\n if self.n_blocks < 1:\n ''' get init x feat '''\n hand_global_feat = self.hand_global_enc(hand_batch.reshape(batch_size,-1))\n obj_feat = self.obj_global_enc(obj_feat.reshape(batch_size,-1))\n\n # obj_feat = torch.arange(0,batch_size,device=hand_batch.device)\n # obj_feat = self.obj_cat_embed(obj_feat)\n if self.mode == 'target':\n total_cond_feat = torch.cat([sigma_feat, obj_feat], dim=-1) #\n # total_cond_feat = sigma_feat\n\n ''' main backbone of x '''\n x = torch.cat([hand_global_feat, total_cond_feat], -1)\n x = self.mlp1_main(x)\n x = torch.cat([x, total_cond_feat], -1)\n x = self.mlp2_main(x)\n else:\n obj_feat = obj_feat.reshape(batch_size,-1)\n obj_feat = self.pre_dense_cond(obj_feat)\n\n x = self.hand_global_enc(hand_batch.reshape(batch_size,-1))\n x = x + self.pre_dense_t(sigma_feat)\n x = x + obj_feat\n # x = self.pre_gnorm(x)\n x = self.act(x)\n \n for idx in range(self.n_blocks):\n x1 = getattr(self, f'b{idx+1}_dense1')(x)\n x1 = x1 + getattr(self, f'b{idx+1}_dense1_t')(sigma_feat)\n x1 = x1 + getattr(self, f'b{idx+1}_dense1_cond')(obj_feat)\n # x1 = getattr(self, f'b{idx+1}_gnorm1')(x1)\n x1 = self.act(x1)\n # dropout, maybe\n # x1 = self.dropout(x1)\n\n x2 = getattr(self, f'b{idx+1}_dense2')(x1)\n x2 = x2 + getattr(self, f'b{idx+1}_dense2_t')(sigma_feat)\n x2 = x2 + getattr(self, f'b{idx+1}_dense2_cond')(obj_feat)\n # x2 = getattr(self, f'b{idx+1}_gnorm2')(x2)\n x2 = self.act(x2)\n # dropout, maybe\n # x2 = self.dropout(x2)\n\n x = x + x2\n\n x = self.post_dense(x)\n # normalize the output\n \n _, std = self.marginal_prob_func(x, t) \n x = x / (std + 1e-7)\n if obj_feature:\n return x, obj_feat_fr\n else:\n return x" } ]
from datetime import datetime from gym.spaces import Space from collections import deque from torch.utils.data import Dataset, TensorDataset, DataLoader from torch.utils.tensorboard import SummaryWriter from Algorithms.ppo import RolloutStorage from Algorithms.ppo import ActorCritic from Algorithms.SDE_update import loss_fn_cond, cond_ode_sampler, init_sde from Networks.SDENets_update import CondScoreModel from tqdm import tqdm from ipdb import set_trace import os import time import functools import numpy as np import statistics import glob import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import copy import time import pickle import cv2 import matplotlib.pyplot as plt import io import _pickle as CPickle
13,390
def images_to_video(path, images, fps=10, size=(256,256), suffix='mp4'): path = path+f'.{suffix}' out = cv2.VideoWriter(filename=path, fourcc=cv2.VideoWriter_fourcc(*'mp4v'), fps=fps, frameSize=size, isColor=True) for item in images: out.write(item.astype(np.uint8)) out.release() def get_img_from_fig(fig, dpi=180): buf = io.BytesIO() fig.savefig(buf, format="png", dpi=dpi) buf.seek(0) img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) buf.close() img = cv2.imdecode(img_arr, 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img class GFPPO: def __init__(self, vec_env, cfg_train, device='cpu', sampler='sequential', log_dir='run', is_testing=False, print_log=True, apply_reset=False, asymmetric=False, args=None, ): self.args = args ''' PPO ''' # PPO parameters if not isinstance(vec_env.observation_space, Space): raise TypeError("vec_env.observation_space must be a gym Space") if not isinstance(vec_env.state_space, Space): raise TypeError("vec_env.state_space must be a gym Space") if not isinstance(vec_env.action_space, Space): raise TypeError("vec_env.action_space must be a gym Space") self.observation_space = vec_env.observation_space self.action_space = vec_env.action_space self.state_space = vec_env.state_space self.cfg_train = copy.deepcopy(cfg_train) learn_cfg = self.cfg_train["learn"] self.device = device self.asymmetric = asymmetric self.desired_kl = learn_cfg.get("desired_kl", None) self.schedule = learn_cfg.get("schedule", "fixed") self.step_size = learn_cfg["optim_stepsize"] self.init_noise_std = learn_cfg.get("init_noise_std", 0.3) self.model_cfg = self.cfg_train["policy"] self.num_transitions_per_env=learn_cfg["nsteps"] self.learning_rate=learn_cfg["optim_stepsize"] self.clip_param = learn_cfg["cliprange"] self.num_learning_epochs = learn_cfg["noptepochs"] self.num_mini_batches = learn_cfg["nminibatches"] self.value_loss_coef = learn_cfg.get("value_loss_coef", 2.0) self.entropy_coef = learn_cfg["ent_coef"] self.gamma = learn_cfg["gamma"] self.lam = learn_cfg["lam"] self.max_grad_norm = learn_cfg.get("max_grad_norm", 2.0) self.use_clipped_value_loss = learn_cfg.get("use_clipped_value_loss", False) # policy type self.action_type = self.cfg_train["setting"]["action_type"] self.sub_action_type = self.cfg_train["setting"]["sub_action_type"] self.action_clip = self.cfg_train["setting"]["action_clip"] self.grad_process = self.cfg_train["setting"]["grad_process"] self.grad_scale = self.cfg_train["setting"]["grad_scale"] if self.action_type=='joint' and self.sub_action_type=='add+jointscale': action_space_shape = (18+18,) else: action_space_shape = self.action_space.shape print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') self.vec_env = vec_env self.vec_env.grad_scale = self.grad_scale pointnet_version = self.cfg_train["policy"]["pointnet_version"] hand_pcl = self.cfg_train["policy"]["hand_pcl"] hand_model = None # PPO components self.stack_frame_numer = self.vec_env.stack_frame_numbers self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape, self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args) # pointnet backbone self.pointnet_finetune = self.model_cfg['finetune_pointnet'] self.finetune_pointnet_bz = 128 if self.model_cfg['pretrain_pointnet']: if pointnet_version == 'pt2': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device) elif pointnet_version == 'pt': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device) if self.model_cfg['shared_pointnet']: self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.pointnet_enc.named_parameters(): param.requires_grad = False else: self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict) self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.actor_pointnet_enc.named_parameters(): param.requires_grad = False for name,param in self.actor_critic.critic_pointnet_enc.named_parameters(): param.requires_grad = False self.actor_critic.to(self.device)
# gf part save_video = False img_size = 256 save_state = False def images_to_video(path, images, fps=10, size=(256,256), suffix='mp4'): path = path+f'.{suffix}' out = cv2.VideoWriter(filename=path, fourcc=cv2.VideoWriter_fourcc(*'mp4v'), fps=fps, frameSize=size, isColor=True) for item in images: out.write(item.astype(np.uint8)) out.release() def get_img_from_fig(fig, dpi=180): buf = io.BytesIO() fig.savefig(buf, format="png", dpi=dpi) buf.seek(0) img_arr = np.frombuffer(buf.getvalue(), dtype=np.uint8) buf.close() img = cv2.imdecode(img_arr, 1) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) return img class GFPPO: def __init__(self, vec_env, cfg_train, device='cpu', sampler='sequential', log_dir='run', is_testing=False, print_log=True, apply_reset=False, asymmetric=False, args=None, ): self.args = args ''' PPO ''' # PPO parameters if not isinstance(vec_env.observation_space, Space): raise TypeError("vec_env.observation_space must be a gym Space") if not isinstance(vec_env.state_space, Space): raise TypeError("vec_env.state_space must be a gym Space") if not isinstance(vec_env.action_space, Space): raise TypeError("vec_env.action_space must be a gym Space") self.observation_space = vec_env.observation_space self.action_space = vec_env.action_space self.state_space = vec_env.state_space self.cfg_train = copy.deepcopy(cfg_train) learn_cfg = self.cfg_train["learn"] self.device = device self.asymmetric = asymmetric self.desired_kl = learn_cfg.get("desired_kl", None) self.schedule = learn_cfg.get("schedule", "fixed") self.step_size = learn_cfg["optim_stepsize"] self.init_noise_std = learn_cfg.get("init_noise_std", 0.3) self.model_cfg = self.cfg_train["policy"] self.num_transitions_per_env=learn_cfg["nsteps"] self.learning_rate=learn_cfg["optim_stepsize"] self.clip_param = learn_cfg["cliprange"] self.num_learning_epochs = learn_cfg["noptepochs"] self.num_mini_batches = learn_cfg["nminibatches"] self.value_loss_coef = learn_cfg.get("value_loss_coef", 2.0) self.entropy_coef = learn_cfg["ent_coef"] self.gamma = learn_cfg["gamma"] self.lam = learn_cfg["lam"] self.max_grad_norm = learn_cfg.get("max_grad_norm", 2.0) self.use_clipped_value_loss = learn_cfg.get("use_clipped_value_loss", False) # policy type self.action_type = self.cfg_train["setting"]["action_type"] self.sub_action_type = self.cfg_train["setting"]["sub_action_type"] self.action_clip = self.cfg_train["setting"]["action_clip"] self.grad_process = self.cfg_train["setting"]["grad_process"] self.grad_scale = self.cfg_train["setting"]["grad_scale"] if self.action_type=='joint' and self.sub_action_type=='add+jointscale': action_space_shape = (18+18,) else: action_space_shape = self.action_space.shape print(f'action_space_shape:{action_space_shape}!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') self.vec_env = vec_env self.vec_env.grad_scale = self.grad_scale pointnet_version = self.cfg_train["policy"]["pointnet_version"] hand_pcl = self.cfg_train["policy"]["hand_pcl"] hand_model = None # PPO components self.stack_frame_numer = self.vec_env.stack_frame_numbers self.actor_critic = ActorCritic(self.observation_space.shape, self.state_space.shape, action_space_shape, self.init_noise_std, self.model_cfg, asymmetric=asymmetric, stack_frame_number=self.stack_frame_numer, sub_obs_type=self.vec_env.sub_obs_type, num_fingertip=self.vec_env.num_fingertips, pointnet_type=pointnet_version, envs=self.vec_env, hand_pcl=hand_pcl, hand_model=hand_model, args=args) # pointnet backbone self.pointnet_finetune = self.model_cfg['finetune_pointnet'] self.finetune_pointnet_bz = 128 if self.model_cfg['pretrain_pointnet']: if pointnet_version == 'pt2': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt2.pt'), map_location=self.device) elif pointnet_version == 'pt': pointnet_model_dict = torch.load(os.path.join(args.score_model_path,'pt.pt'), map_location=self.device) if self.model_cfg['shared_pointnet']: self.actor_critic.pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.pointnet_enc.named_parameters(): param.requires_grad = False else: self.actor_critic.actor_pointnet_enc.load_state_dict(pointnet_model_dict) self.actor_critic.critic_pointnet_enc.load_state_dict(pointnet_model_dict) if not self.model_cfg['finetune_pointnet']: # freeze pointnet for name,param in self.actor_critic.actor_pointnet_enc.named_parameters(): param.requires_grad = False for name,param in self.actor_critic.critic_pointnet_enc.named_parameters(): param.requires_grad = False self.actor_critic.to(self.device)
self.storage = RolloutStorage(self.vec_env.num_envs, self.num_transitions_per_env, self.observation_space.shape,
0
2023-11-09 06:08:40+00:00
16k
ml4bio/RhoFold
rhofold/model/structure_module.py
[ { "identifier": "Linear", "path": "rhofold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)" }, { "identifier": "LayerNorm", "path": "rhofold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "Rigid", "path": "rhofold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "rhofold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "rhofold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" }, { "identifier": "RNAAlphabet", "path": "rhofold/utils/alphabet.py", "snippet": "class RNAAlphabet(Alphabet):\n\n def get_batch_converter(self):\n if self.use_msa:\n return RNAMSABatchConverter(self)\n else:\n return BatchConverter(self)\n\n @classmethod\n def from_architecture(cls, name: str, ) -> \"RNAAlphabet\":\n if name in (\"RNA MSA Transformer\", \"rna_msa_transformer\", \"RNA\"):\n standard_toks = rna_msaseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(\n standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa\n )" }, { "identifier": "RNAConverter", "path": "rhofold/utils/converter.py", "snippet": "class RNAConverter():\n \"\"\"RNA Structure Converter.\"\"\"\n\n def __init__(self):\n \"\"\"\"\"\"\n\n self.eps = 1e-4\n self.__init()\n\n def __init(self):\n \"\"\"\"\"\"\n\n self.cord_dict = defaultdict(dict)\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n for atom_name, _, cord_vals in RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]:\n self.cord_dict[resd_name][atom_name] = torch.tensor(cord_vals, dtype=torch.float32)\n\n trans_dict_all = {}\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n trans_dict = {}\n cord_dict = {}\n\n atom_infos = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n angl_infos = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n n_angls = len(angl_infos)\n \n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == 0:\n cord_dict[atom_name] = self.cord_dict[resd_name][atom_name]\n\n trans_dict['omega-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n trans_dict['phi-main'] = (torch.eye(3, dtype=torch.float32), torch.zeros((3), dtype=torch.float32))\n\n for idx_angl, (angl_name, _, atom_names_sel) in enumerate(angl_infos):\n x1 = cord_dict[atom_names_sel[0]]\n x2 = cord_dict[atom_names_sel[1]]\n x3 = cord_dict[atom_names_sel[2]]\n rot, tsl_vec = calc_rot_tsl(x1, x3, x3 + (x3 - x2))\n trans_dict['%s-main' % angl_name] = (rot, tsl_vec)\n\n for atom_name, idx_rgrp, _ in atom_infos:\n if idx_rgrp == idx_angl + 3:\n cord_dict[atom_name] = tsl_vec + torch.sum(\n rot * self.cord_dict[resd_name][atom_name].view(1, 3), dim=1)\n\n for idx_angl_src in range(1, n_angls - 1):\n idx_angl_dst = idx_angl_src + 1\n angl_name_src = angl_infos[idx_angl_src][0]\n angl_name_dst = angl_infos[idx_angl_dst][0]\n rot_src, tsl_vec_src = trans_dict['%s-main' % angl_name_src]\n rot_dst, tsl_vec_dst = trans_dict['%s-main' % angl_name_dst]\n rot = torch.matmul(rot_src.transpose(1, 0), rot_dst)\n tsl_vec = torch.matmul(rot_src.transpose(1, 0), tsl_vec_dst - tsl_vec_src)\n trans_dict['%s-%s' % (angl_name_dst, angl_name_src)] = (rot, tsl_vec)\n\n trans_dict_all[resd_name] = trans_dict\n\n self.trans_dict_init = trans_dict_all\n\n def build_cords(self, seq, fram, angl, rtn_cmsk=False):\n\n # initialization\n n_resds = len(seq)\n device = angl.device\n\n angl = angl.squeeze(dim=0) / (torch.norm(angl.squeeze(dim=0), dim=2, keepdim=True) + self.eps)\n rigid = Rigid.from_tensor_7(fram, normalize_quats=True)\n fram = rigid.to_tensor_4x4()\n rot = fram[:,:,:3,:3]\n tsl = fram[:,:,:3,3:].permute(0,1,3,2)\n\n fram = torch.cat([rot, tsl], dim=2)[:,:,:4,:3].permute(1,0,2,3)\n fmsk = torch.ones((n_resds, 1), dtype=torch.int8, device=device)\n amsk = torch.ones((n_resds, RNA_CONSTANTS.N_ANGLS_PER_RESD_MAX), dtype=torch.int8, device=device)\n cord = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX, 3), dtype=torch.float32, device=device)\n cmsk = torch.zeros((n_resds, RNA_CONSTANTS.ATOM_NUM_MAX), dtype=torch.int8, device=device)\n\n for resd_name in RNA_CONSTANTS.RESD_NAMES:\n idxs = [x for x in range(n_resds) if seq[x] == resd_name]\n if len(idxs) == 0:\n continue\n cord[idxs], cmsk[idxs] =\\\n self.__build_cord(resd_name, fram[idxs], fmsk[idxs], angl[idxs], amsk[idxs])\n\n return (cord, cmsk) if rtn_cmsk else (cord)\n\n def __build_cord(self, resd_name, fram, fmsk, angl, amsk):\n \"\"\"\"\"\"\n\n # initialization\n device = fram.device\n n_resds = fram.shape[0]\n atom_names_all = RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]\n atom_names_pad = atom_names_all + ['X'] * (RNA_CONSTANTS.ATOM_NUM_MAX - len(atom_names_all))\n atom_infos_all = RNA_CONSTANTS.ATOM_INFOS_PER_RESD[resd_name]\n\n cord_dict = defaultdict(\n lambda: torch.zeros((n_resds, 3), dtype=torch.float32, device=device))\n cmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n fram_null = torch.tensor(\n [[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]], dtype=torch.float32, device=device)\n fram_dict = defaultdict(lambda: fram_null.unsqueeze(dim=0).repeat(n_resds, 1, 1))\n fmsk_vec_dict = defaultdict(lambda: torch.zeros((n_resds), dtype=torch.int8, device=device))\n\n trans_dict = {'main': (fram[:, 0, :3], fram[:, 0, 3])}\n\n rot_curr, tsl_curr = trans_dict['main']\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == 0]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk[:, 0]\n\n # determine 3D coordinates of atoms belonging to side-chain rigid-groups\n angl_infos_all = RNA_CONSTANTS.ANGL_INFOS_PER_RESD[resd_name]\n rgrp_names_all = ['omega', 'phi'] + [x[0] for x in angl_infos_all]\n\n for idx_rgrp, rgrp_name_curr in enumerate(rgrp_names_all):\n if rgrp_name_curr in ['omega', 'phi', 'angl_0', 'angl_1']:\n rgrp_name_prev = 'main'\n else:\n rgrp_name_prev = 'angl_%d' % (int(rgrp_name_curr[-1]) - 1)\n\n rot_prev, tsl_prev = trans_dict[rgrp_name_prev]\n rot_base, tsl_vec_base = \\\n self.trans_dict_init[resd_name]['%s-%s' % (rgrp_name_curr, rgrp_name_prev)]\n rot_base = rot_base.unsqueeze(dim=0).to(device)\n tsl_base = tsl_vec_base.unsqueeze(dim=0).to(device)\n \n rot_addi, tsl_addi = calc_angl_rot_tsl(angl[:, idx_rgrp])\n rot_curr, tsl_curr = merge_rot_tsl(\n rot_prev, tsl_prev, rot_base, tsl_base, rot_addi, tsl_addi)\n trans_dict[rgrp_name_curr] = (rot_curr, tsl_curr)\n\n fram_dict[rgrp_name_curr] = \\\n torch.cat([rot_curr, tsl_curr.unsqueeze(dim=1)], dim=1)\n fmsk_vec_dict[rgrp_name_curr] = fmsk[:, 0] * amsk[:, idx_rgrp]\n\n atom_names_sel = [x[0] for x in atom_infos_all if x[1] == idx_rgrp + 1]\n for atom_name_sel in atom_names_sel:\n cord_vec = self.cord_dict[resd_name][atom_name_sel].to(device)\n\n cord_dict[atom_name_sel] = \\\n tsl_curr + torch.sum(rot_curr * cord_vec.view(1, 1, 3), dim=2)\n cmsk_vec_dict[atom_name_sel] = fmsk_vec_dict[rgrp_name_curr]\n\n cmsk = torch.stack([cmsk_vec_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n cord = torch.stack([cord_dict[x] for x in atom_names_pad][:RNA_CONSTANTS.ATOM_NUM_MAX], dim=1)\n\n return cord, cmsk\n\n def export_pdb_file(self, seq, atom_cords, path, atom_masks=None, confidence=None, chain_id=None, logger = None):\n \"\"\"Export a PDB file.\"\"\"\n\n # configurations\n i_code = ' '\n chain_id = '0' if chain_id is None else chain_id\n occupancy = 1.0\n cord_min = -999.0\n cord_max = 999.0\n seq_len = len(seq)\n\n n_key_atoms = RNA_CONSTANTS.ATOM_NUM_MAX\n\n # take all the atom coordinates as valid, if not specified\n if atom_masks is None:\n atom_masks = np.ones(atom_cords.shape[:-1], dtype=np.int8)\n\n # determine the set of atom names (per residue)\n if atom_cords.ndim == 2:\n if atom_cords.shape[0] == seq_len * n_key_atoms:\n atom_cords = np.reshape(atom_cords, [seq_len, n_key_atoms, 3])\n atom_masks = np.reshape(atom_masks, [seq_len, n_key_atoms])\n else:\n raise ValueError('atom coordinates\\' shape does not match the sequence length')\n\n elif atom_cords.ndim == 3:\n assert atom_cords.shape[0] == seq_len\n atom_cords = atom_cords\n atom_masks = atom_masks\n\n else:\n raise ValueError('atom coordinates must be a 2D or 3D np.ndarray')\n\n # reset invalid values in atom coordinates\n atom_cords = np.clip(atom_cords, cord_min, cord_max)\n atom_cords[np.isnan(atom_cords)] = 0.0\n atom_cords[np.isinf(atom_cords)] = 0.0\n\n # export the 3D structure to a PDB file\n os.makedirs(os.path.dirname(os.path.realpath(path)), exist_ok=True)\n with open(path, 'w') as o_file:\n n_atoms = 0\n for idx_resd, resd_name in enumerate(seq):\n for idx_atom, atom_name in enumerate(RNA_CONSTANTS.ATOM_NAMES_PER_RESD[resd_name]):\n\n temp_factor = 0.0 if confidence is None else \\\n float(100 * confidence.reshape([seq_len])[idx_resd - 1])\n\n if atom_masks[idx_resd, idx_atom] == 0:\n continue\n n_atoms += 1\n charge = atom_name[0]\n line_str = ''.join([\n 'ATOM ',\n '%5d' % n_atoms,\n ' ' + atom_name + ' ' * (3 - len(atom_name)),\n ' %s' % resd_name,\n ' %s' % chain_id,\n ' ' * (4 - len(str(idx_resd + 1))),\n '%s' % str(idx_resd + 1),\n '%s ' % i_code,\n '%8.3f' % atom_cords[idx_resd, idx_atom, 0],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 1],\n '%8.3f' % atom_cords[idx_resd, idx_atom, 2],\n '%6.2f' % occupancy,\n '%6.2f' % temp_factor,\n ' ' * 10,\n '%2s' % charge,\n '%2s' % ' ',\n ])\n assert len(line_str) == 80, 'line length must be exactly 80 characters: ' + line_str\n o_file.write(line_str + '\\n')\n\n if logger is not None:\n logger.info(f' Export PDB file to {path}')" } ]
import math import torch import torch.nn as nn import torch.nn.functional as F from typing import Optional, Tuple, Sequence from rhofold.model.primitives import Linear, LayerNorm from rhofold.utils.rigid_utils import Rigid from rhofold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) from einops import rearrange from rhofold.utils.alphabet import RNAAlphabet from rhofold.utils.converter import RNAConverter
11,003
no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): z[0] = z[0].cpu() # [*, H, N_res, N_res] a = torch.matmul(
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class RefineNet(nn.Module): """""" def __init__(self, dim = 64, is_pos_emb = True, n_layer = 4, enable = True, **kwargs): """Constructor function.""" super().__init__() self.is_pos_emb = is_pos_emb self.alphabet = RNAAlphabet.from_architecture('RNA') self.embed_tokens = nn.Embedding(len(self.alphabet), dim) self.enable = enable if self.is_pos_emb: self.embed_positions = PosEmbedding(4096, dim, self.alphabet.padding_idx) self.refine_layer0 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer1 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer2 = ResEGNN(corrections=n_layer, dims_in=dim) self.refine_layer3 = ResEGNN(corrections=n_layer, dims_in=dim) def forward(self, tokens, cords): """Perform the forward pass. Args: Returns: """ if not self.enable: return cords tokens = tokens[:, 0, :] tokens = tokens.unsqueeze(-1).repeat(1, 1, 23) b, l, n = tokens.shape cords = cords.reshape([b, l, n, 3]) fea = self.embed_tokens(tokens) b, l, n, _ = fea.shape if self.is_pos_emb: fea += self.embed_positions(tokens.reshape(b * l, n)).view(fea.size()) out = self.refine_layer0(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer1(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, n, l, -1]).transpose(1,2) out = self.refine_layer2(fea.reshape([ b * l, n, -1]), cords.reshape([ b * l, n, -1]), is_fea = True) fea, cords = out[-1] fea = fea.reshape([b, l, n, -1]).transpose(1,2) cords = cords.reshape([b, l, n, -1]).transpose(1,2) out = self.refine_layer3(fea.reshape([ b * n, l, -1]), cords.reshape([ b * n, l, -1]), is_fea = True) fea, cords = out[-1] cords = cords.reshape([b, n, l, -1]).transpose(1,2) cords = cords.reshape([b, l * n, 3]) return cords class Swish_(torch.nn.Module): def forward(self, x): return x * x.sigmoid() SiLU = torch.nn.SiLU if hasattr(torch.nn, 'SiLU') else Swish_ class CoorsNorm(torch.nn.Module): def __init__(self, eps=1e-8): super().__init__() self.eps = eps self.fn = torch.nn.LayerNorm(1) def forward(self, coors): norm = coors.norm(dim=-1, keepdim=True) normed_coors = coors / norm.clamp(min=self.eps) phase = self.fn(norm) return phase * normed_coors # classes class EGNN(torch.nn.Module): def __init__( self, dim, m_dim=32, ): super().__init__() ''' # Most of the code in this file is based on egnn-pytorch by lucidrains. ''' edge_input_dim = (dim * 2) + 1 self.edge_mlp = torch.nn.Sequential( torch.nn.Linear(edge_input_dim, edge_input_dim * 2), SiLU(), torch.nn.Linear(edge_input_dim * 2, m_dim), SiLU() ) self.coors_norm = CoorsNorm() self.node_mlp = torch.nn.Sequential( torch.nn.Linear(dim + m_dim, dim * 2), SiLU(), torch.nn.Linear(dim * 2, dim), ) self.coors_mlp = torch.nn.Sequential( torch.nn.Linear(m_dim, m_dim * 4), SiLU(), torch.nn.Linear(m_dim * 4, 1) ) def forward(self, feats, coors): rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d') rel_dist = (rel_coors ** 2).sum(dim=-1, keepdim=True) feats_j = rearrange(feats, 'b j d -> b () j d') feats_i = rearrange(feats, 'b i d -> b i () d') feats_i, feats_j = torch.broadcast_tensors(feats_i, feats_j) edge_input = torch.cat((feats_i, feats_j, rel_dist), dim=-1) m_ij = self.edge_mlp(edge_input) coor_weights = self.coors_mlp(m_ij) coor_weights = rearrange(coor_weights, 'b i j () -> b i j') rel_coors = self.coors_norm(rel_coors) scale_factor = 1 / 50.0 coors_out = torch.einsum('b i j, b i j c -> b i c', coor_weights * scale_factor, rel_coors) + coors m_i = m_ij.sum(dim=-2) node_mlp_input = torch.cat((feats, m_i), dim=-1) node_out = self.node_mlp(node_mlp_input) + feats return node_out, coors_out class ResEGNN(torch.nn.Module): def __init__(self, corrections=4, dims_in=41, **kwargs): super().__init__() self.layers = torch.nn.ModuleList([EGNN(dim=dims_in, **kwargs) for _ in range(corrections)]) def forward(self, amino, geom, is_fea = False, keep_last_cords = None): output = [] for layer in self.layers: geom_init = geom amino, geom = layer(amino, geom) if keep_last_cords is not None: geom[:, -keep_last_cords:] = geom_init[:, -keep_last_cords:] output.append([amino, geom]) return output if is_fea else geom class PosEmbedding(nn.Embedding): """ """ def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int): if padding_idx is not None: num_embeddings_ = num_embeddings + padding_idx + 1 else: num_embeddings_ = num_embeddings super().__init__(num_embeddings_, embedding_dim, padding_idx) self.max_positions = num_embeddings def forward(self, input: torch.Tensor): """Input is expected to be of size [bsz x seqlen].""" mask = input.ne(self.padding_idx).int() positions = (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + self.padding_idx return F.embedding( positions, self.weight, self.padding_idx, self.max_norm, self.norm_type, self.scale_grad_by_freq, self.sparse, ) class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden) self.linear_2 = Linear(self.c_hidden, self.c_hidden) self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) # hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) # ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s) self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): z[0] = z[0].cpu() # [*, H, N_res, N_res] a = torch.matmul(
permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden]
4
2023-11-01 10:29:08+00:00
16k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): # change to 0.01\n for char in text:\n print(char, end='', flush=True)\n time.sleep(delay)\n print()" }, { "identifier": "shop_help", "path": "components/common_functions.py", "snippet": "def shop_help():\n print_slow(Fore.YELLOW + \"Shop Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[buy] - Use the 'buy [upgrade]' command to purchase the upgrade in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "help_user", "path": "components/common_functions.py", "snippet": "def help_user():\n print_slow(Fore.MAGENTA + \"Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[connect] - Use the 'connect' command to hack into Enigma Corps network.\")\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to view and respond to emails from your client and other characters.\")\n print_slow(\"\")\n print_slow(\"[balance] - Use the 'balance' command to view your current earnings which you can spend on upgrades. \")\n print_slow(\"\")\n print_slow(\"[shop] - Use the 'shop' command to view upgrades available in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[help] - Use the 'help' command if you need assistance at any time.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the Main Menu.\")\n print_slow(\"\")" }, { "identifier": "connect_help", "path": "components/common_functions.py", "snippet": "def connect_help():\n print_slow(Fore.MAGENTA + \"Connect Help:\" + Style.RESET_ALL)\n print_slow(\n \"[scan] - Use the 'scan' command to scan the network and search for available systems and vulnerabilities.\")\n print_slow(\"\")\n print_slow(\"[hack] - Use the 'hack [system/vulnerability]' to hack into different systems.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[disconnect] - Use the 'disconnect' command to disconnect from the current system or vulnerability.\")\n print_slow(\"\")" }, { "identifier": "mail_help", "path": "components/common_functions.py", "snippet": "def mail_help():\n print_slow(Fore.LIGHTBLUE_EX + \"Mail Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list all emails.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [subject]' command to read an email with the specified subject.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "system_help", "path": "components/common_functions.py", "snippet": "def system_help():\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to log into the users emails.\")\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list files in a users system.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [file]' command to read files in a users system\")\n print_slow(\"\")" }, { "identifier": "intro_call", "path": "conversations/calls.py", "snippet": "def intro_call():\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Welcome, Cipher. Operation Enigma is our covert mission against Enigma Corp, a powerful and secretive entity.\")\n print_slow(\n \"Your skills and secrecy have brought you to our attention. Your mission is to dig through their systems and servers looking for valuable data.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Got it, Anonymous. Exposing secrets and bringing justice. I'm in.\")\n print_slow(\"What's my first move? Talk to me about this 'EnigmaLink'.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Excellent, Cipher. EnigmaLink is a specialized tool available on the Hacker's Market. It contains a hidden backdoor, allowing access to Enigma Corps servers.\")\n print_slow(\n \"Your task is to acquire EnigmaLink and initiate your infiltration. Use the 'connect' command to navigate the network and gather crucial intelligence.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"EnigmaLink, got it. I'll secure it and initiate the infiltration. What about this employee, Amy?\")\n print_slow(\"You mentioned her password is 'sexinthecity.' What's my objective with her?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Good question, Cipher. Amy is a key target. Use her password to access her computer and gather any pertinent information.\")\n print_slow(\n \"This data is vital to our cause. Be thorough and meticulous in your investigation. The success of our operation depends on it.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Understood, Anonymous. I'll focus on Amy, gather intel, and proceed with precision.\")\n print_slow(\"Consider it done. Anything else I should know before I dive in?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"One last thing, Cipher. All collected data is highly confidential. This contract is binding, and your success is paramount.\")\n print_slow(\"Execute with diligence, and may the odds be in your favor. Good luck, Cipher.\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "first_call", "path": "conversations/calls.py", "snippet": "def first_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"That's a good start, but we already have that information.\")\n print_slow(\"Regardless, I've transferred £20 into the account for your troubles.\")\n print_slow(\"Keep digging Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "second_call", "path": "conversations/calls.py", "snippet": "def second_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Hey Cipher, you nailed it! 'Billy' just spilled the beans about wanting to climb the corporate ladder into management.\")\n print_slow(\n \"This is gold for us. We can guide 'Billy' toward training and workshops that align with our interests, nudging things in our favor.\")\n print_slow(\n \"Picture it – we're pulling the strings, helping 'Billy' grow, and steering the ship where we want it to go.\")\n print_slow(\"Keep the ball rolling, Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "third_call", "path": "conversations/calls.py", "snippet": "def third_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\"\n \"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've stumbled upon a perplexing development regarding Enigma's interest in a mysterious 'compound.'\")\n print_slow(\n \"I'm cross-referencing our existing intel to unveil more details. Stay vigilant and be prepared for the unknown.\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A compound, huh? Any hints on whether we're talking metal, chemicals, or something else entirely?\")\n print_slow(\"This feels like navigating in the dark. What exactly am I dealing with?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Response\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"Cipher, we're in the dark too. Initial reports are unclear—could be metal, chemical, or something beyond our comprehension.\")\n print_slow(\n \"Your mission is to identify the nature of this compound. Exercise extreme caution; this goes deeper than we anticipated.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Inquiry\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"So, we're playing 'guess the compound.' Any leads, any connections I should explore?\")\n print_slow(\"This is starting to sound like one of those high-stakes puzzles.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Clarification\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"I wish I had more details, Cipher. This is uncharted territory for us. Investigate discreetly, and trust no one.\")\n print_slow(\n \"I'll attempt to gather more intel. Stay on the line, and keep me updated on any findings.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fourth_call", "path": "conversations/calls.py", "snippet": "def fourth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've got our hands on an intriguing document – an Employee Performance Review for 'Billy Constantine'.\")\n print_slow(\n \"This could be a goldmine of information. Let's dig in and see if there's anything we can leverage to our advantage.\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"An Employee Performance Review? Interesting choice. What's the scoop on 'Billy Constantine'?\")\n print_slow(\"Give me the details, and we'll figure out our next move.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, 'Billy Constantine' is making waves. The review highlights exceptional performance as a sales representative.\")\n print_slow(\n \"He's exceeding sales targets, mentoring new team members, and earning a solid 4.5/5 rating. A rising star, it seems.\")\n print_slow(\"We might use this to our advantage. Let's explore how we can align his ambitions with our agenda.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Strategy\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A high-performing sales rep, huh? We could steer 'Billy' towards projects that align with our goals.\")\n print_slow(\"Let's use this performance review to our advantage. Maybe mentorship programs, leadership initiatives?\")\n print_slow(\"I'm ready to play this card strategically. What's the next move?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Great thinking, Cipher. Let's work on a plan to subtly guide 'Billy' toward initiatives that benefit us.\")\n print_slow(\"We'll need to dig deeper into 'Billy's' aspirations and weave our influence seamlessly.\")\n print_slow(\"Stay vigilant, Cipher. This could be a game-changer.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fifth_call", "path": "conversations/calls.py", "snippet": "def fifth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've intercepted some Meeting Minutes dated 24/06/2025. It's related to 'Project X' and involves key players.\")\n print_slow(\n \"This could be our chance to uncover more about Enigma's activities. Let's dive into the details and see what we can extract.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"Meeting Minutes, huh? 'Project X' sounds intriguing. Who were the players involved, and what's the agenda?\")\n print_slow(\"I'm ready to dissect this information and uncover any hidden gems.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, the meeting involved key personnel—Amy, Billy, Kyle, and others. 'Project X' is on the agenda, and there's mention of sensitive materials.\")\n print_slow(\n \"This could be a crucial insight into Enigma's plans. Let's analyze the action items and plan our next move.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Analysis\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"'Project X,' sensitive materials, and action items. This is a goldmine of information.\")\n print_slow(\n \"Let's focus on dissecting the action items and see if we can connect the dots. What's our strategy, Anonymous?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Agreed, Cipher. Let's delve into the action items, especially the data compilation and safety protocol training.\")\n print_slow(\"We might uncover more about 'Project X' and gain insights into Enigma's plans.\")\n print_slow(\"Stay sharp, Cipher. This could be a pivotal moment in our mission.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "sixth_call", "path": "conversations/calls.py", "snippet": "def sixth_call():\n print_slow(\"ADD CALL STUFF HERE\")" }, { "identifier": "markus_seen_call", "path": "conversations/calls.py", "snippet": "def markus_seen_call():\n print_slow(\"Something goes here\")" }, { "identifier": "code_shatter_call", "path": "conversations/minigame_calls.py", "snippet": "def code_shatter_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"I see you have bought CodeShatter!\")\n print_slow(\"This item is a one time use upgrade so once you get the password, it is gone so use wisely!\")\n print_slow(\"But don't threat, if you fail, you get a chance to retry. The item is only used when you get the password, so be sure to write it down!\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "code_shatter_minigame", "path": "minigames/code_shatter_minigame.py", "snippet": "def code_shatter_minigame():\n # Generate a random 5-digit number\n target = [str(random.randint(1, 9)) for _ in range(5)]\n\n print_slow(\"Welcome to CodeShatter!\")\n print_slow(\"\")\n print_slow(\"Guess the 5-digit number.\")\n print_slow(\"\")\n print_slow(\"The sequence can contain multiple same numbers\")\n print_slow(\"\")\n print_slow(Fore.GREEN + \"Green: Correct digit in correct position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"Orange: Correct digit in incorrect position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Red: Incorrect digit.\" + Style.RESET_ALL)\n print_slow(\"\")\n\n attempts = 0\n while attempts < 7:\n # Get the user's guess\n guess = input(\"Enter your guess: \")\n\n if len(guess) != 5 or not guess.isdigit():\n print_slow(\"Invalid input. Please enter a 5-digit number.\")\n continue\n\n attempts += 1\n\n # Check the guess against the target\n feedback = []\n for i in range(5):\n if guess[i] == target[i]:\n feedback.append(Fore.GREEN + guess[i] + Style.RESET_ALL)\n elif guess[i] in target:\n feedback.append(Fore.YELLOW + guess[i] + Style.RESET_ALL)\n else:\n feedback.append(Fore.RED + guess[i] + Style.RESET_ALL)\n\n print_slow(\"Feedback: \" + \" \".join(feedback))\n\n # Check if the guess is correct\n if guess == \"\".join(target):\n print_slow(Fore.GREEN + \"Access granted.\" + Style.RESET_ALL)\n break\n else:\n print_slow(Fore.RED + \"Access denied. Too many attempts.\" + Style.RESET_ALL)\n time.sleep(1)\n print_slow(\"\")\n print_slow(Fore.RED + \"Rebooting CodeShatter with new proxy...\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n code_shatter_minigame()" }, { "identifier": "port_scanning", "path": "minigames/eye_spy_minigame.py", "snippet": "def port_scanning():\n num_ports = 10\n open_ports, closed_ports = generate_ports(num_ports)\n attempts = 5\n correct_guesses = 0\n scan_attempts = 2\n\n print_slow(\"Welcome to the Port Scanning minigame!\")\n print_slow(\"\")\n print_slow(f\"Find the open ports in the range 1-{num_ports}.\")\n print_slow(\"\")\n print_slow(f\"You have {attempts} attempts.\")\n print_slow(\"\")\n\n while scan_attempts > 0:\n print_slow(\"\")\n print_slow(f\"\\nYou have {scan_attempts} scan attempts left.\")\n print_slow(\"\")\n start = int(input(\"Enter the start of the range to scan: \"))\n print_slow(\"\")\n end = int(input(\"Enter the end of the range to scan: \"))\n print_slow(\"\")\n\n num_open_ports_in_range = len(open_ports.intersection(range(start, end + 1)))\n print_slow(\"\")\n print_slow(f\"There are {num_open_ports_in_range} open ports in the range {start}-{end}.\")\n\n scan_attempts -= 1\n\n while attempts > 0 and len(open_ports) > 0:\n port = int(input(\"\\nEnter a port number to guess: \"))\n\n if port in open_ports:\n print_slow(Fore.GREEN + \"Port is open!\" + Style.RESET_ALL)\n open_ports.remove(port)\n correct_guesses += 1\n elif port in closed_ports:\n print_slow(Fore.RED + \"Port is closed.\" + Style.RESET_ALL)\n closed_ports.remove(port)\n else:\n print_slow(\"Invalid port number. Please enter a number between 1 and\", num_ports)\n\n attempts -= 1\n\n if len(open_ports) == 0:\n print_slow(\n Fore.GREEN + \"\\nCongratulations! You have successfully found all the open ports and gained access to the camera.\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n else:\n print_slow(\n Fore.RED + f\"\\nHack Failed! You found {correct_guesses} out of {len(open_ports) + correct_guesses} open ports.\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n port_scanning()" }, { "identifier": "AmySystem", "path": "systems/level_1/amy/amy_system.py", "snippet": "class AmySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"return_to_work_form.txt\",\n \"content\": (\n \"Employee Name: _______________\\n\"\n \"Employee ID: ____________\\n\"\n \"Department: _______________\\n\"\n \"Date of Return: ______\\n\\n\"\n \"I, [Employee Name], certify that I have followed the company's \"\n \"guidelines for returning to work after an absence. \"\n \"I understand that it is my responsibility to adhere to all safety \"\n \"protocols and procedures to ensure the health and well-being of my \"\n \"colleagues and myself.\\n\\n\"\n \"I acknowledge that I have completed any necessary training and have \"\n \"been briefed on any updates to the company's policies and procedures. \"\n \"I am aware that I must report any symptoms or exposure to COVID-19 to \"\n \"my supervisor immediately.\\n\\n\"\n \"I am committed to doing my part to maintain a safe and healthy work \"\n \"environment for everyone. I will continue to follow all guidelines \"\n \"and protocols and will cooperate with any additional measures that \"\n \"may be implemented in the future.\\n\\n\"\n \"Signature: [Employee Signature]\\n\"\n \"Date: [Date]\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"benefits_summary.txt\",\n \"content\": (\n \"At Enigma Corps, we believe in taking care of our employees and \"\n \"offer a comprehensive benefits package to support your health, well-being, \"\n \"and financial security. Below is a summary of the benefits available to \"\n \"you as an employee of Enigma Corps.\\n\\n\"\n \"Health Insurance: We offer a choice of medical, dental, and vision \"\n \"plans to meet your needs. Our plans provide coverage for preventive care, \"\n \"hospitalization, prescription drugs, and more.\\n\\n\"\n \"Retirement Savings: We offer a 401(k) plan with a generous company \"\n \"match to help you save for your future. You can choose from a variety of \"\n \"investment options to suit your needs.\\n\\n\"\n \"Paid Time Off: We provide a generous amount of paid time off, \"\n \"including vacation, sick leave, and holiday pay. We also offer paid \"\n \"parental leave for new parents.\\n\\n\"\n \"Flexible Work Arrangements: We understand the importance of work-life \"\n \"balance and offer flexible work arrangements, such as remote work and \"\n \"flexible schedules, where possible.\\n\\n\"\n \"Wellness Programs: We offer a variety of wellness programs and \"\n \"resources to support your physical and mental health, including fitness \"\n \"classes, stress management programs, and counseling services.\\n\\n\"\n \"Professional Development: We are committed to supporting your growth \"\n \"and development and offer a variety of training and development \"\n \"opportunities, including tuition reimbursement, workshops, and seminars.\"\n \"\\n\\n\"\n \"We encourage you to review this summary carefully and take advantage of \"\n \"the benefits available to you. If you have any questions or need further \"\n \"information, please contact the HR department.\"\n )\n },\n ]\n self.emails = [\n {\n \"sender\": \"Amy\",\n \"subject\": \"Can't Stop Thinking About You\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I hope this message finds you in good spirits. I've been meaning to write to you for a while now, but I couldn't find the right words to express what I've been feeling.\\n\\n\"\n \"Ever since that night we spent together, I can't seem to get you out of my mind. There's something about the way you make me feel that I've never experienced before. \"\n \"\\nIt's exhilarating, yet terrifying all at the same time.\\n\\n\"\n \"I know we both have a lot on our plates right now, and I don't want to add any more stress to your life. But I can't help but wonder what could happen if we gave this a real shot. \"\n \"I know it's complicated, and there are a lot of factors to consider, but I think we owe it to ourselves to explore this connection we have.\\n\\n\"\n \"I understand if you're not ready to take that step, and I don't want to pressure you into anything you're not comfortable with. \"\n \"\\nBut I can't shake the feeling that we could have something truly special together.\\n\\n\"\n \"I'd love to hear your thoughts on this, and I'm more than willing to take things slow if that's what you need. Maybe we could meet up for dinner and talk about it in person?\"\n \" I think it would be easier to have this conversation face-to-face.\\n\\n\"\n \"I hope you're doing well, and I look forward to hearing from you soon.\\n\\n\"\n \"Take care,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and ask for your help on the Smith project. I've been having some trouble with the data analysis portion,\"\n \"\\nand I know you have a lot of experience in that area.\\n\\n\"\n \"The project involves analyzing customer feedback data to identify trends and areas for improvement. I've been working on it for a few weeks now, but I'm finding it challenging to make sense of the data and\"\n \"\\ndraw meaningful conclusions.\\n\\n\"\n \"Would you be available for a quick meeting later this week to go over some of the data with me? I would really appreciate your input and guidance on this. \"\n \"\\nI think your expertise could really help me make progress and ensure the success of the project.\\n\\n\"\n \"If you're available, please let me know your preferred date and time, and I'll send out a calendar invite. I'm flexible and can work around your schedule.\\n\\n\"\n \"Thank you in advance for your help, and I look forward to hearing from you soon.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Request for Time Off\",\n \"body\": (\n \"Good Afternoon Katie,\\n\\n\"\n \"I hope this email finds you well. I wanted to request some time off next month for a family vacation. I am planning to be out of the office from 10/09/2024 to 18/09/2024\\n\\n\"\n \"I have been working hard on the Johnson project and have made significant progress. I will make sure to finish up any outstanding work and hand off any ongoing projects to my colleagues before I leave. I will also be available by email in case of any urgent matters.\\n\\n\"\n \"I understand that this is a busy time for the team, and I want to ensure that my absence doesn't cause any disruptions. I have already spoken to Markus and he has kindly agreed to cover for me while I'm away.\\n\\n\"\n \"Thank you for considering my request. I look forward to spending some quality time with my family and coming back to work refreshed and recharged.\"\n \"\\nI am confident that the time off will help me come back with renewed energy and focus.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Apology for the Mistake\",\n \"body\": (\n \"Good Morning Kyle,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and apologize for the mistake I made on the Johnson report. I realize now that I overlooked some important data, and I take full responsibility for it.\\n\\n\"\n \"I have gone back and corrected the report, and I will make sure to double-check my work in the future to avoid any similar mistakes. I have also attached the updated report for your reference.\\n\\n\"\n \"I understand if you are disappointed or frustrated, and I am more than willing to do whatever it takes to make it right. Please let me know if there's anything else I can do to fix this,\"\n \"\\nor if you would like to discuss this further.\\n\\n\"\n \"Once again, I am truly sorry for the mistake, and I appreciate your understanding. I value our working relationship and hope that this incident doesn't tarnish it. I am committed to making amends and ensuring that this doesn't happen again in the future.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I wanted to take a moment to express my gratitude for allowing me to use your computer while mine was being serviced by IT. \"\n \"It was a huge help and allowed me to stay productive during that time.\\n\\n\"\n \"I also noticed that your password is 'football'. While I understand it's easy to remember, it's important to choose a more secure password to protect your accounts.\"\n \"\\nI would recommend changing it to something more complex and unique. You never know who's watching after all.\\n\\n\"\n \"Thanks again for your generosity and understanding.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "BillySystem", "path": "systems/level_1/billy/billy_system.py", "snippet": "class BillySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"cover_letter.txt\",\n \"content\": (\n \"Dear Hiring Manager,\\n\\n\"\n \"I am writing to express my interest in the management position at Enigma Corps. \"\n \"I have been with the company for over 7 years and have consistently demonstrated my commitment to driving excellence and fostering collaboration within the team.\\n\\n\"\n \"During my tenure at Enigma Corps, I have been involved in various projects, including the successful completion of the Q3 deliverables project, where I played a key role in the planning and execution stages. \"\n \"My dedication to achieving project milestones and my ability to work under pressure make me a strong candidate for a management role.\\n\\n\"\n \"I possess strong leadership skills, which I have honed through my experiences in leading teams and coordinating cross-functional efforts. \"\n \"My ability to communicate effectively and build positive relationships with team members and stakeholders has resulted in successful project outcomes and increased productivity.\\n\\n\"\n \"In addition to my technical and leadership skills, I am also committed to continuous learning and professional development. \"\n \"I have participated in various training programs and workshops to enhance my management skills and stay up-to-date with industry trends and best practices.\\n\\n\"\n \"I am excited about the opportunity to contribute to the growth and success of Enigma Corps as a member of the management team. \"\n \"I am confident that my skills and experience will be valuable assets to the company, and I look forward to the opportunity to work closely with the team to drive innovation and excellence.\\n\\n\"\n \"Thank you for considering my application. I am looking forward to the opportunity to discuss my qualifications further and explore how I can contribute to the success of Enigma Corps.\\n\\n\"\n \"Sincerely,\\n\"\n \"Billy Constantine\\n\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"meeting_minutes.txt\",\n \"content\": (\n \"Meeting Minutes\\n\\n\"\n \"Date: 24/06/2025\\n\"\n \"Location: REDACTED\\n\"\n \"Attendees: Amy, REDACTED, Billy, Kyle, REDACTED, REDACTED, REDACTED\\n\\n\"\n \"Agenda:\\n\"\n \"- Discuss progress on Project REDACTED\\n\"\n \"- Review safety protocols for handling sensitive materials\\n\"\n \"- Plan next steps for research and development\\n\\n\"\n \"Action Items:\\n\"\n \"- Compile data from recent experiments and share with team\\n\"\n \"- Schedule training session on updated safety protocols\\n\"\n \"- Develop timeline for next phase of Project X\\n\\n\"\n \"Next Meeting: 05/08/24, 12:00pm\\n\"\n )\n },\n {\n \"name\": \"employee_performance_review.txt\",\n \"content\": (\n \"Employee Performance Review\\n\\n\"\n \"Employee Name: Billy Constantine\\n\"\n \"Employee ID: 035854\\n\"\n \"Review Date: 28/06/2024\\n\\n\"\n \"Performance Summary:\\n\"\n \"Billy has demonstrated exceptional performance in his role as a sales representative. He has consistently exceeded sales targets, built strong relationships with clients, and demonstrated leadership qualities in team meetings and projects.\\n\\n\"\n \"Strengths:\\n\"\n \"- Exceeded quarterly sales targets by 15%.\\n\"\n \"- Successfully onboarded and mentored two new team members.\\n\"\n \"- Demonstrated excellent communication and negotiation skills.\\n\\n\"\n \"Areas for Improvement:\\n\"\n \"- Time management skills can be further developed to ensure all tasks are completed in a timely manner.\\n\"\n \"- Continued development of technical knowledge to stay up-to-date with industry trends.\\n\"\n \"- Strengthen collaboration with cross-functional teams to drive more integrated solutions.\\n\\n\"\n \"Goals for Next Review Period:\\n\"\n \"- Increase sales targets by 20%.\\n\"\n \"- Complete a management training program.\\n\"\n \"- Improve time management skills through prioritization and delegation.\\n\\n\"\n \"Overall Rating: 4.5/5\\n\"\n \"Reviewer Name: Katie Thompson\\n\"\n \"Reviewer Signature: Katie Thompson\\n\"\n \"Date: 28/06/2024\\n\"\n )\n }\n ]\n self.emails = [\n\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Amy,\\n\\n\"\n \"I hope this message finds you in great spirits! I'm more than happy to lend a helping hand with the Smith project. After all, two heads are better than one, especially when it comes to data analysis, right?\\n\\n\"\n \"How about we grab a coffee and chat about the project in person? I think it would be nice to catch up and discuss the data over a cup of joe. I'm sure we can brainstorm some ideas and come up with a game plan together.\\n\\n\"\n \"I'm free [date] at [time], does that work for you? If not, just let me know your availability, and we can find a time that suits us both. I'm really looking forward to our coffee date and tackling the project together.\\n\\n\"\n \"Can't wait to see you and dive into the data!\\n\\n\"\n \"Best,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Project Update\",\n \"body\": (\n \"Hello Team,\\n\\n\"\n \"I wanted to provide everyone with a quick update on our progress with the Q3 deliverables project. We've successfully completed the initial research phase and are now moving into the planning stage.\\n\\n\"\n \"In our last meeting, we discussed the following key points:\\n\"\n \"- Compound Analysis: We've identified a unique compound with potential applications in various industries. Further testing and analysis are required to unlock its full potential.\\n\"\n \"- Resource Management: We've allocated a special team and dedicated resources to handle the delicate nature of this project, ensuring utmost confidentiality and security.\\n\"\n \"- Safety Protocols: We've developed strict safety protocols to handle the compound, and we're conducting regular training sessions to ensure compliance.\\n\\n\"\n \"Our next steps include finalizing the project plan, assigning tasks to team members, and setting deadlines. I would appreciate input and feedback from all team members to ensure we're on the right track. Please review the attached project plan document for more details.\\n\\n\"\n \"Additionally, I want to remind everyone of the confidential nature of this project. It's imperative that we maintain discretion and follow all security protocols to safeguard our work. Let's work together to make this project a success and uphold the company's reputation for innovation and excellence.\\n\\n\"\n \"If you have any questions or concerns, please don't hesitate to reach out. Your cooperation and commitment to this project are greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Can't Stop Thinking About You\",\n \"body\": (\n \"Hey there, Amy,\\n\\n\"\n \"Wow, your message really caught me by surprise! But in the best way possible, of course. I've been trying to play it cool, but I have to admit, I've been thinking about that night a lot too. There was just something electric in the air, wasn't there?\\n\\n\"\n \"I've been tossing and turning, wondering if I should reach out to you or if I should wait for you to make the first move. I guess you beat me to it, and I'm glad you did. It's like you read my mind.\\n\\n\"\n \"I can't deny that there's a certain chemistry between us, and I'm intrigued to see where it could lead. I agree that our lives are complicated, and we don't want to add more stress to each other's plates. But sometimes, taking a risk is what makes life exciting, don't you think?\\n\\n\"\n \"I don't want to rush things or make you feel pressured in any way. I'm more than happy to take things slow and let them unfold naturally. But I can't help but imagine the possibilities if we give this a real shot. We could have something truly special, and I don't want to let that pass us by.\\n\\n\"\n \"How about we meet up for dinner and drinks next week? We can talk about it more and see where the night takes us. I think it would be a fun and relaxed way to get to know each other better and explore this connection we have. What do you say?\\n\\n\"\n \"I hope you're doing well, and I'm eagerly awaiting your reply. Until then, I'll be daydreaming about our next encounter.\\n\\n\"\n \"Take care, and talk to you soon.\\n\\n\"\n \"Yours truly,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Amy,\\n\\n\"\n \"No problem at all! I'm always here to help out when I can. It's what teammates do, right?\\n\\n\"\n \"Oh, and about the password thing – haha, I know it's not the most secure choice. I've been meaning to change it, but I guess old habits die hard, right? \"\n \"Thanks for looking out for me though! I'll try to come up with something a bit more creative next time.\\n\\n\"\n \"If you ever need anything else, just give me a shout. Happy to help!\\n\\n\"\n \"Take care,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Professional Development\",\n \"body\": (\n \"Good Evening Katie,\\n\\n\"\n \"I hope this email finds you well. I'm reaching out to express my interest in professional development opportunities within the company, particularly in the area of management and leadership.\\n\\n\"\n \"I've been with the company for several years now, and I've had the chance to work on various projects and collaborate with different teams. I'm keen to build on this experience and take on more responsibility, and I believe that acquiring the necessary skills for a management role would be a great next step in my career.\\n\\n\"\n \"Could you please provide information on available training programs, workshops, or seminars that focus on leadership development and management skills? I'm particularly interested in areas such as team leadership, strategic planning, conflict resolution, and decision-making.\\n\\n\"\n \"Additionally, if there are any tuition reimbursement programs or resources for management training and certification, I'd like to learn more about them. I'm committed to investing time and effort in my professional growth and believe that these opportunities would greatly benefit both myself and the company.\\n\\n\"\n \"Your guidance and assistance in exploring these options would be greatly appreciated. I look forward to your response and any recommendations you may have.\\n\\n\"\n \"Thank you for your support, and I'm excited about the prospect of contributing to the company's success in a management role.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "camera_first", "path": "systems/level_1/cameras/camera_1.py", "snippet": "def camera_first():\n print(camera_1)\n print()\n print()\n move = input(Fore.GREEN + \"> \" + Style.RESET_ALL)\n\n if move.lower() == \"forward\":\n clear_terminal()\n camera_second()\n elif move.lower() == \"back\":\n print(Fore.RED + \"There is nothing to go back to...\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n camera_first()" }, { "identifier": "MarkusSystem", "path": "systems/level_1/markus/markus_system.py", "snippet": "class MarkusSystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"system_log.txt\",\n \"content\": (\n \"Enigma Corps System Log\\n\\n\"\n \"Date: 2023-11-16 08:00 AM\\n\"\n \"Event Type: System Startup\\n\"\n \"Description: The Enigma Corps systems smoothly initiated startup procedures, ensuring a seamless beginning to the workday.\\n\\n\"\n \"Date: 2023-11-16 10:30 AM\\n\"\n \"Event Type: Network Upgrade\\n\"\n \"Description: Implemented a network upgrade to enhance data transfer speeds, providing improved efficiency across departments.\\n\\n\"\n \"Date: 2023-11-16 01:45 PM\\n\"\n \"Event Type: Security Patch Applied\\n\"\n \"Description: Critical security patch successfully applied to safeguard against potential vulnerabilities, ensuring system integrity.\\n\\n\"\n \"Date: 2023-11-16 04:20 PM\\n\"\n \"Event Type: Server Maintenance\\n\"\n \"Description: Conducted routine maintenance on Enigma Corps servers, optimizing performance and minimizing downtime.\\n\\n\"\n \"This dynamic system log captures key events, from the smooth startup of the day to network upgrades, security enhancements, and routine maintenance. It serves as a valuable record for troubleshooting and analysis, ensuring the optimal functionality of Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"technical_documentation.docx\",\n \"content\": (\n \"Enigma Corps System Technical Documentation\\n\\n\"\n \"1. System Architecture:\\n\"\n \" - Overview of the system's structural design and components.\\n\\n\"\n \"2. Network Configuration:\\n\"\n \" - Details on the configuration of Enigma Corps' network setup for efficient communication.\\n\\n\"\n \"3. Security Protocols:\\n\"\n \" - Comprehensive overview of security measures and protocols implemented to safeguard sensitive data.\\n\\n\"\n \"4. Troubleshooting Guide:\\n\"\n \" - Step-by-step guide for identifying and resolving common issues to ensure seamless system functionality.\\n\\n\"\n \"5. Software Installation Procedures:\\n\"\n \" - Instructions for installing and updating software components within the Enigma Corps system.\\n\\n\"\n \"6. Hardware Specifications:\\n\"\n \" - Detailed specifications of the hardware components utilized in the Enigma Corps infrastructure.\\n\\n\"\n \"This meticulously crafted technical documentation serves as a go-to resource for understanding the Enigma Corps system, covering everything from its architecture and network configuration to security protocols, troubleshooting, and hardware specifications. It's an invaluable reference for maintaining optimal system performance.\"\n )\n },\n {\n \"name\": \"passwords.txt\",\n \"content\": (\n \"Sensitive Password Information for Enigma Corps\\n\\n\"\n \"Admin Password: *********\\n\"\n \"Database Password: *********\\n\"\n \"Router Password: *********\\n\"\n \"WiFi Password: *********\\n\"\n \"Encryption Key: *********\\n\\n\"\n \"Warning: This file contains confidential information. Keep it secure, and refrain from sharing passwords without explicit authorization. Safeguarding this information is crucial to maintaining the security and integrity of the Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"software_inventory.csv\",\n \"content\": (\n \"Software Inventory for Enigma Corps\\n\\n\"\n \"Software Name, Version, License Key\\n\"\n \"1. Enigma Security Suite, v2.0, X1Y2Z3A4-B5C6D7E8-F9G0H1I2\\n\"\n \"2. DataGuard Backup, v1.5, Y3X2W1V0-U9T8S7R6-Q5P4O3N2\\n\"\n \"3. Office Suite, v2022, Z9Z8Z7Z6-Z5Z4Z3Z2-Z1Z0Z9Z8-Z7Z6Z5\\n\"\n \"4. VPN Client, v3.1, W6W5W4W3-W2W1W0-W9W8W7-W6W5W4\\n\"\n \"5. Project Management Tool, v4.2, VV8V7V6V5-V4V3V2V1-V0V9V8V7-V6V5V4\\n\\n\"\n \"Important: This inventory is crucial for tracking and managing software across Enigma Corps systems. The provided license keys are randomized for security reasons. Handle this information responsibly, and ensure it is only accessible to authorized personnel to maintain the security and compliance of our software assets.\"\n )\n }\n ]\n self.emails = [\n # Email to Management\n {\n \"sender\": \"Markus\",\n \"subject\": \"System Maintenance Scheduled\",\n \"body\": (\n \"Dear Michael,\\n\\n\"\n \"I hope this email finds you well. We wanted to inform you that we have scheduled a system maintenance session for the upcoming weekend to ensure the optimal performance and security of our systems.\\n\\n\"\n \"Maintenance Details:\\n\"\n \"- Date: 16/12/23 - 17/12/23\\n\"\n \"- Time: 3:00pm\\n\"\n \"- Duration: 1 Hour\\n\"\n \"- Impact: No impact expected\\n\\n\"\n \"During this period, there might be temporary disruptions in certain services. Our team will be working diligently to minimize any inconvenience. If you have any concerns or specific considerations, please feel free to reach out to us.\\n\\n\"\n \"Thank you for your understanding and cooperation.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Department\"\n )\n },\n {\n # Email to Employees\n \"sender\": \"Markus\",\n \"subject\": \"Upcoming Software Update\",\n \"body\": (\n \"Good afternoon, Kyle,\\n\\n\"\n \"We hope you're doing well. Our IT team is excited to inform you about an upcoming software update that will enhance the functionality and security of our systems. The update is scheduled for [Date] at [Time]. Please take note of the following details:\\n\\n\"\n \"- Expected Duration: Two Days\\n\"\n \"- Action Required: As this will be processed during the weekend, no action is required.\\n\"\n \"- Impact: While we anticipate minimal impact on your day-to-day activities, it's essential to be aware of any potential changes. These include: New UI to navigate, logging in or logging out issues.\\n\\n\"\n \"We recommend saving your work and logging out of your system before the update. If you encounter any issues post-update, don't hesitate to contact our IT support team for assistance.\\n\\n\"\n \"Thank you for your cooperation and understanding.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Support Team\"\n )\n },\n # Email from Markus to Billy\n {\n \"sender\": \"Markus\",\n \"subject\": \"Urgent: Password Security Update Required\",\n \"body\": (\n \"Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to bring to your attention the importance of updating your current password. This is not the first time I've raised this concern, and I want to emphasize its critical nature.\\n\\n\"\n \"In recent security assessments, it has been flagged that your current password might not meet the latest security standards. To ensure the safety of your account and our overall cybersecurity, it is imperative that you change your password promptly.\\n\\n\"\n \"I understand that these reminders may seem repetitive, but they stem from a genuine concern for the security of your account and our collective responsibility in maintaining a robust cybersecurity posture.\\n\\n\"\n \"Please take a moment at your earliest convenience to update your password. If you encounter any issues or have questions, feel free to reach out. Your cooperation is greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Markus, Security Team\"\n )\n }\n\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" } ]
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
13,730
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem()
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem()
markus_system = MarkusSystem()
21
2023-11-06 09:52:13+00:00
16k
Codra-Ingenierie-Informatique/DataLab
cdl/core/computation/signal.py
[ { "identifier": "fit", "path": "cdl/algorithms/fit.py", "snippet": "class FitModel(abc.ABC):\nclass GaussianModel(FitModel):\nclass LorentzianModel(FitModel):\nclass VoigtModel(FitModel):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(\n cls, amplitude, sigma\n ): # pylint: disable=unused-argument\n def amplitude(cls, amp, sigma):\n def fwhm(cls, amp, sigma):\n def half_max_segment(cls, amp, sigma, x0, y0):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(cls, amplitude, sigma):\n def amplitude(cls, amp, sigma):\n def fwhm(cls, amp, sigma):\n def func(cls, x, amp, sigma, x0, y0):\n def get_amp_from_amplitude(cls, amplitude, sigma):\n def amplitude(cls, amp, sigma):\n def fwhm(cls, amp, sigma):\n def func(cls, x, amp, sigma, x0, y0):\n def fwhm(cls, amp, sigma):" }, { "identifier": "derivative", "path": "cdl/algorithms/signal.py", "snippet": "def derivative(x: np.ndarray, y: np.ndarray) -> np.ndarray:\n \"\"\"Compute numerical derivative.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n\n Returns:\n np.ndarray: Numerical derivative\n \"\"\"\n dy = np.zeros_like(y)\n dy[0:-1] = np.diff(y) / np.diff(x)\n dy[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])\n return dy" }, { "identifier": "interpolate", "path": "cdl/algorithms/signal.py", "snippet": "def interpolate(\n x: np.ndarray,\n y: np.ndarray,\n xnew: np.ndarray,\n method: str,\n fill_value: float | None = None,\n) -> np.ndarray:\n \"\"\"Interpolate data.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n xnew (numpy.ndarray): New X data\n method (str): Interpolation method. Valid values are 'linear', 'spline',\n 'quadratic', 'cubic', 'barycentric', 'pchip'\n fill_value (float | None): Fill value. Defaults to None.\n This value is used to fill in for requested points outside of the\n X data range. It is only used if the method argument is 'linear',\n 'cubic' or 'pchip'.\n \"\"\"\n interpolator_extrap = None\n if method == \"linear\":\n # Linear interpolation using NumPy's interp function:\n ynew = np.interp(xnew, x, y, left=fill_value, right=fill_value)\n elif method == \"spline\":\n # Spline using 1-D interpolation with SciPy's interpolate package:\n knots, coeffs, degree = scipy.interpolate.splrep(x, y, s=0)\n ynew = scipy.interpolate.splev(xnew, (knots, coeffs, degree), der=0)\n elif method == \"quadratic\":\n # Quadratic interpolation using NumPy's polyval function:\n coeffs = np.polyfit(x, y, 2)\n ynew = np.polyval(coeffs, xnew)\n elif method == \"cubic\":\n # Cubic interpolation using SciPy's Akima1DInterpolator class:\n interpolator_extrap = scipy.interpolate.Akima1DInterpolator(x, y)\n elif method == \"barycentric\":\n # Barycentric interpolation using SciPy's BarycentricInterpolator class:\n interpolator = scipy.interpolate.BarycentricInterpolator(x, y)\n ynew = interpolator(xnew)\n elif method == \"pchip\":\n # PCHIP interpolation using SciPy's PchipInterpolator class:\n interpolator_extrap = scipy.interpolate.PchipInterpolator(x, y)\n else:\n raise ValueError(f\"Invalid interpolation method {method}\")\n if interpolator_extrap is not None:\n ynew = interpolator_extrap(xnew, extrapolate=fill_value is None)\n if fill_value is not None:\n ynew[xnew < x[0]] = fill_value\n ynew[xnew > x[-1]] = fill_value\n return ynew" }, { "identifier": "moving_average", "path": "cdl/algorithms/signal.py", "snippet": "def moving_average(y: np.ndarray, n: int) -> np.ndarray:\n \"\"\"Compute moving average.\n\n Args:\n y (numpy.ndarray): Input array\n n (int): Window size\n\n Returns:\n np.ndarray: Moving average\n \"\"\"\n y_padded = np.pad(y, (n // 2, n - 1 - n // 2), mode=\"edge\")\n return np.convolve(y_padded, np.ones((n,)) / n, mode=\"valid\")" }, { "identifier": "normalize", "path": "cdl/algorithms/signal.py", "snippet": "def normalize(yin: np.ndarray, parameter: str = \"maximum\") -> np.ndarray:\n \"\"\"Normalize input array to a given parameter.\n\n Args:\n yin (numpy.ndarray): Input array\n parameter (str | None): Normalization parameter. Defaults to \"maximum\".\n Supported values: 'maximum', 'amplitude', 'sum', 'energy'\n\n Returns:\n np.ndarray: Normalized array\n \"\"\"\n axis = len(yin.shape) - 1\n if parameter == \"maximum\":\n maximum = np.max(yin, axis)\n if axis == 1:\n maximum = maximum.reshape((len(maximum), 1))\n maxarray = np.tile(maximum, yin.shape[axis]).reshape(yin.shape)\n return yin / maxarray\n if parameter == \"amplitude\":\n ytemp = np.array(yin, copy=True)\n minimum = np.min(yin, axis)\n if axis == 1:\n minimum = minimum.reshape((len(minimum), 1))\n ytemp -= minimum\n return normalize(ytemp, parameter=\"maximum\")\n if parameter == \"sum\":\n return yin / yin.sum()\n if parameter == \"energy\":\n return yin / (yin * yin.conjugate()).sum()\n raise RuntimeError(f\"Unsupported parameter {parameter}\")" }, { "identifier": "peak_indexes", "path": "cdl/algorithms/signal.py", "snippet": "def peak_indexes(\n y, thres: float = 0.3, min_dist: int = 1, thres_abs: bool = False\n) -> np.ndarray:\n # Copyright (c) 2014 Lucas Hermann Negri\n # Unmodified code snippet from PeakUtils 1.3.0\n \"\"\"Peak detection routine.\n\n Finds the numeric index of the peaks in *y* by taking its first order\n difference. By using *thres* and *min_dist* parameters, it is possible\n to reduce the number of detected peaks. *y* must be signed.\n\n Parameters\n ----------\n y : ndarray (signed)\n 1D amplitude data to search for peaks.\n thres : float between [0., 1.]\n Normalized threshold. Only the peaks with amplitude higher than the\n threshold will be detected.\n min_dist : int\n Minimum distance between each detected peak. The peak with the highest\n amplitude is preferred to satisfy this constraint.\n thres_abs: boolean\n If True, the thres value will be interpreted as an absolute value,\n instead of a normalized threshold.\n\n Returns\n -------\n ndarray\n Array containing the numeric indexes of the peaks that were detected\n \"\"\"\n if isinstance(y, np.ndarray) and np.issubdtype(y.dtype, np.unsignedinteger):\n raise ValueError(\"y must be signed\")\n\n if not thres_abs:\n thres = thres * (np.max(y) - np.min(y)) + np.min(y)\n\n # compute first order difference\n dy = np.diff(y)\n\n # propagate left and right values successively to fill all plateau pixels\n # (0-value)\n (zeros,) = np.where(dy == 0)\n\n # check if the signal is totally flat\n if len(zeros) == len(y) - 1:\n return np.array([])\n\n if len(zeros):\n # compute first order difference of zero indexes\n zeros_diff = np.diff(zeros)\n # check when zeros are not chained together\n (zeros_diff_not_one,) = np.add(np.where(zeros_diff != 1), 1)\n # make an array of the chained zero indexes\n zero_plateaus = np.split(zeros, zeros_diff_not_one)\n\n # fix if leftmost value in dy is zero\n if zero_plateaus[0][0] == 0:\n dy[zero_plateaus[0]] = dy[zero_plateaus[0][-1] + 1]\n zero_plateaus.pop(0)\n\n # fix if rightmost value of dy is zero\n if len(zero_plateaus) > 0 and zero_plateaus[-1][-1] == len(dy) - 1:\n dy[zero_plateaus[-1]] = dy[zero_plateaus[-1][0] - 1]\n zero_plateaus.pop(-1)\n\n # for each chain of zero indexes\n for plateau in zero_plateaus:\n median = np.median(plateau)\n # set leftmost values to leftmost non zero values\n dy[plateau[plateau < median]] = dy[plateau[0] - 1]\n # set rightmost and middle values to rightmost non zero values\n dy[plateau[plateau >= median]] = dy[plateau[-1] + 1]\n\n # find the peaks by using the first order difference\n peaks = np.where(\n (np.hstack([dy, 0.0]) < 0.0)\n & (np.hstack([0.0, dy]) > 0.0)\n & (np.greater(y, thres))\n )[0]\n\n # handle multiple peaks, respecting the minimum distance\n if peaks.size > 1 and min_dist > 1:\n highest = peaks[np.argsort(y[peaks])][::-1]\n rem = np.ones(y.size, dtype=bool)\n rem[peaks] = False\n\n for peak in highest:\n if not rem[peak]:\n sl = slice(max(0, peak - min_dist), peak + min_dist + 1)\n rem[sl] = True\n rem[peak] = False\n\n peaks = np.arange(y.size)[~rem]\n\n return peaks" }, { "identifier": "xpeak", "path": "cdl/algorithms/signal.py", "snippet": "def xpeak(x: np.ndarray, y: np.ndarray) -> float:\n \"\"\"Return default peak X-position (assuming a single peak).\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n\n Returns:\n float: Peak X-position\n \"\"\"\n peaks = peak_indexes(y)\n if peaks.size == 1:\n return x[peaks[0]]\n return np.average(x, weights=y)" }, { "identifier": "xy_fft", "path": "cdl/algorithms/signal.py", "snippet": "def xy_fft(\n x: np.ndarray, y: np.ndarray, shift: bool = True\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute FFT on X,Y data.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n shift (bool | None): Shift the zero frequency to the center of the spectrum.\n Defaults to True.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: X,Y data\n \"\"\"\n y1 = np.fft.fft(y)\n x1 = np.fft.fftfreq(x.shape[-1], d=x[1] - x[0])\n if shift:\n x1 = np.fft.fftshift(x1)\n y1 = np.fft.fftshift(y1)\n return x1, y1" }, { "identifier": "xy_ifft", "path": "cdl/algorithms/signal.py", "snippet": "def xy_ifft(\n x: np.ndarray, y: np.ndarray, shift: bool = True\n) -> tuple[np.ndarray, np.ndarray]:\n \"\"\"Compute iFFT on X,Y data.\n\n Args:\n x (numpy.ndarray): X data\n y (numpy.ndarray): Y data\n shift (bool | None): Shift the zero frequency to the center of the spectrum.\n Defaults to True.\n\n Returns:\n tuple[np.ndarray, np.ndarray]: X,Y data\n \"\"\"\n x1 = np.fft.fftfreq(x.shape[-1], d=x[1] - x[0])\n if shift:\n x1 = np.fft.ifftshift(x1)\n y = np.fft.ifftshift(y)\n y1 = np.fft.ifft(y)\n return x1, y1.real" }, { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):" }, { "identifier": "ClipParam", "path": "cdl/core/computation/base.py", "snippet": "class ClipParam(gds.DataSet):\n \"\"\"Data clipping parameters\"\"\"\n\n value = gds.FloatItem(_(\"Clipping value\"))" }, { "identifier": "FFTParam", "path": "cdl/core/computation/base.py", "snippet": "class FFTParam(gds.DataSet):\n \"\"\"FFT parameters\"\"\"\n\n shift = gds.BoolItem(_(\"Shift\"), help=_(\"Shift zero frequency to center\"))" }, { "identifier": "GaussianParam", "path": "cdl/core/computation/base.py", "snippet": "class GaussianParam(gds.DataSet):\n \"\"\"Gaussian filter parameters\"\"\"\n\n sigma = gds.FloatItem(\"σ\", default=1.0)" }, { "identifier": "MovingAverageParam", "path": "cdl/core/computation/base.py", "snippet": "class MovingAverageParam(gds.DataSet):\n \"\"\"Moving average parameters\"\"\"\n\n n = gds.IntItem(_(\"Size of the moving window\"), default=3, min=1)" }, { "identifier": "MovingMedianParam", "path": "cdl/core/computation/base.py", "snippet": "class MovingMedianParam(gds.DataSet):\n \"\"\"Moving median parameters\"\"\"\n\n n = gds.IntItem(_(\"Size of the moving window\"), default=3, min=1, even=False)" }, { "identifier": "ThresholdParam", "path": "cdl/core/computation/base.py", "snippet": "class ThresholdParam(gds.DataSet):\n \"\"\"Threshold parameters\"\"\"\n\n value = gds.FloatItem(_(\"Threshold\"))" }, { "identifier": "SignalObj", "path": "cdl/core/model/signal.py", "snippet": "class SignalObj(gds.DataSet, base.BaseObj):\n \"\"\"Signal object\"\"\"\n\n PREFIX = \"s\"\n CONF_FMT = Conf.view.sig_format\n DEFAULT_FMT = \"g\"\n VALID_DTYPES = (np.float32, np.float64, np.complex128)\n\n uuid = gds.StringItem(\"UUID\").set_prop(\"display\", hide=True)\n\n _tabs = gds.BeginTabGroup(\"all\")\n\n _datag = gds.BeginGroup(_(\"Data and metadata\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n xydata = gds.FloatArrayItem(_(\"Data\"), transpose=True, minmax=\"rows\")\n metadata = gds.DictItem(_(\"Metadata\"), default={})\n _e_datag = gds.EndGroup(_(\"Data and metadata\"))\n\n _unitsg = gds.BeginGroup(_(\"Titles and units\"))\n title = gds.StringItem(_(\"Signal title\"), default=_(\"Untitled\"))\n _tabs_u = gds.BeginTabGroup(\"units\")\n _unitsx = gds.BeginGroup(_(\"X-axis\"))\n xlabel = gds.StringItem(_(\"Title\"), default=\"\")\n xunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsx = gds.EndGroup(_(\"X-axis\"))\n _unitsy = gds.BeginGroup(_(\"Y-axis\"))\n ylabel = gds.StringItem(_(\"Title\"), default=\"\")\n yunit = gds.StringItem(_(\"Unit\"), default=\"\")\n _e_unitsy = gds.EndGroup(_(\"Y-axis\"))\n _e_tabs_u = gds.EndTabGroup(\"units\")\n _e_unitsg = gds.EndGroup(_(\"Titles and units\"))\n\n _e_tabs = gds.EndTabGroup(\"all\")\n\n def __init__(self, title=None, comment=None, icon=\"\"):\n \"\"\"Constructor\n\n Args:\n title (str): title\n comment (str): comment\n icon (str): icon\n \"\"\"\n gds.DataSet.__init__(self, title, comment, icon)\n base.BaseObj.__init__(self)\n self.regenerate_uuid()\n\n def regenerate_uuid(self):\n \"\"\"Regenerate UUID\n\n This method is used to regenerate UUID after loading the object from a file.\n This is required to avoid UUID conflicts when loading objects from file\n without clearing the workspace first.\n \"\"\"\n self.uuid = str(uuid4())\n\n def copy(\n self, title: str | None = None, dtype: np.dtype | None = None\n ) -> SignalObj:\n \"\"\"Copy object.\n\n Args:\n title (str): title\n dtype (numpy.dtype): data type\n\n Returns:\n SignalObj: copied object\n \"\"\"\n title = self.title if title is None else title\n obj = SignalObj(title=title)\n obj.title = title\n if dtype not in (None, float, complex, np.complex128):\n raise RuntimeError(\"Signal data only supports float64/complex128 dtype\")\n obj.metadata = deepcopy(self.metadata)\n obj.xydata = np.array(self.xydata, copy=True, dtype=dtype)\n return obj\n\n def set_data_type(self, dtype: np.dtype) -> None: # pylint: disable=unused-argument\n \"\"\"Change data type.\n\n Args:\n dtype (numpy.dtype): data type\n \"\"\"\n raise RuntimeError(\"Setting data type is not support for signals\")\n\n def set_xydata(\n self,\n x: np.ndarray | list,\n y: np.ndarray | list,\n dx: np.ndarray | list | None = None,\n dy: np.ndarray | list | None = None,\n ) -> None:\n \"\"\"Set xy data\n\n Args:\n x (numpy.ndarray): x data\n y (numpy.ndarray): y data\n dx (numpy.ndarray): dx data (optional: error bars)\n dy (numpy.ndarray): dy data (optional: error bars)\n \"\"\"\n if x is not None:\n x = np.array(x)\n if y is not None:\n y = np.array(y)\n if dx is not None:\n dx = np.array(dx)\n if dy is not None:\n dy = np.array(dy)\n if dx is None and dy is None:\n self.xydata = np.vstack([x, y])\n else:\n if dx is None:\n dx = np.zeros_like(dy)\n if dy is None:\n dy = np.zeros_like(dx)\n self.xydata = np.vstack((x, y, dx, dy))\n\n def __get_x(self) -> np.ndarray | None:\n \"\"\"Get x data\"\"\"\n if self.xydata is not None:\n return self.xydata[0]\n return None\n\n def __set_x(self, data) -> None:\n \"\"\"Set x data\"\"\"\n self.xydata[0] = np.array(data)\n\n def __get_y(self) -> np.ndarray | None:\n \"\"\"Get y data\"\"\"\n if self.xydata is not None:\n return self.xydata[1]\n return None\n\n def __set_y(self, data) -> None:\n \"\"\"Set y data\"\"\"\n self.xydata[1] = np.array(data)\n\n def __get_dx(self) -> np.ndarray | None:\n \"\"\"Get dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n return self.xydata[2]\n return None\n\n def __set_dx(self, data) -> None:\n \"\"\"Set dx data\"\"\"\n if self.xydata is not None and len(self.xydata) > 2:\n self.xydata[2] = np.array(data)\n else:\n raise ValueError(\"dx data not available\")\n\n def __get_dy(self) -> np.ndarray | None:\n \"\"\"Get dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n return self.xydata[3]\n return None\n\n def __set_dy(self, data) -> None:\n \"\"\"Set dy data\"\"\"\n if self.xydata is not None and len(self.xydata) > 3:\n self.xydata[3] = np.array(data)\n else:\n raise ValueError(\"dy data not available\")\n\n x = property(__get_x, __set_x)\n y = data = property(__get_y, __set_y)\n dx = property(__get_dx, __set_dx)\n dy = property(__get_dy, __set_dy)\n\n def get_data(self, roi_index: int | None = None) -> np.ndarray:\n \"\"\"\n Return original data (if ROI is not defined or `roi_index` is None),\n or ROI data (if both ROI and `roi_index` are defined).\n\n Args:\n roi_index (int): ROI index\n\n Returns:\n numpy.ndarray: data\n \"\"\"\n if self.roi is None or roi_index is None:\n return self.x, self.y\n i1, i2 = self.roi[roi_index, :]\n return self.x[i1:i2], self.y[i1:i2]\n\n def update_plot_item_parameters(self, item: CurveItem) -> None:\n \"\"\"Update plot item parameters from object data/metadata\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been overriden by object metadata entries or other object data. The goal\n is to update the plot item accordingly.\n\n This is *almost* the inverse operation of `update_metadata_from_plot_item`.\n\n Args:\n item: plot item\n \"\"\"\n update_dataset(item.param.line, self.metadata)\n update_dataset(item.param.symbol, self.metadata)\n super().update_plot_item_parameters(item)\n\n def update_metadata_from_plot_item(self, item: CurveItem) -> None:\n \"\"\"Update metadata from plot item.\n\n Takes into account a subset of plot item parameters. Those parameters may\n have been modified by the user through the plot item GUI. The goal is to\n update the metadata accordingly.\n\n This is *almost* the inverse operation of `update_plot_item_parameters`.\n\n Args:\n item: plot item\n \"\"\"\n super().update_metadata_from_plot_item(item)\n restore_dataset(item.param.line, self.metadata)\n restore_dataset(item.param.symbol, self.metadata)\n\n def make_item(self, update_from: CurveItem = None) -> CurveItem:\n \"\"\"Make plot item from data.\n\n Args:\n update_from (CurveItem): plot item to update from\n\n Returns:\n CurveItem: plot item\n \"\"\"\n if len(self.xydata) in (2, 3, 4):\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item = make.mcurve(x.real, y.real, label=self.title)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item = make.merror(x.real, y.real, dy.real, label=self.title)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item = make.merror(x.real, y.real, dx.real, dy.real, label=self.title)\n CurveStyles.apply_style(item.param)\n else:\n raise RuntimeError(\"data not supported\")\n if update_from is None:\n if execenv.demo_mode:\n item.param.line.width = 3\n self.update_plot_item_parameters(item)\n else:\n update_dataset(item.param, update_from.param)\n item.update_params()\n return item\n\n def update_item(self, item: CurveItem, data_changed: bool = True) -> None:\n \"\"\"Update plot item from data.\n\n Args:\n item (CurveItem): plot item\n data_changed (bool): if True, data has changed\n \"\"\"\n if data_changed:\n if len(self.xydata) == 2: # x, y signal\n x, y = self.xydata\n item.set_data(x.real, y.real)\n elif len(self.xydata) == 3: # x, y, dy error bar signal\n x, y, dy = self.xydata\n item.set_data(x.real, y.real, dy=dy.real)\n elif len(self.xydata) == 4: # x, y, dx, dy error bar signal\n x, y, dx, dy = self.xydata\n item.set_data(x.real, y.real, dx.real, dy.real)\n item.param.label = self.title\n self.update_plot_item_parameters(item)\n\n def roi_coords_to_indexes(self, coords: list) -> np.ndarray:\n \"\"\"Convert ROI coordinates to indexes.\n\n Args:\n coords (list): coordinates\n\n Returns:\n numpy.ndarray: indexes\n \"\"\"\n indexes = np.array(coords, int)\n for row in range(indexes.shape[0]):\n for col in range(indexes.shape[1]):\n x0 = coords[row][col]\n indexes[row, col] = np.abs(self.x - x0).argmin()\n return indexes\n\n def get_roi_param(self, title: str, *defaults) -> gds.DataSet:\n \"\"\"Return ROI parameters dataset.\n\n Args:\n title (str): title\n *defaults: default values\n \"\"\"\n imax = len(self.x) - 1\n i0, i1 = defaults\n param = ROIParam(title)\n param.col1 = i0\n param.col2 = i1\n param.set_global_prop(\"data\", min=-1, max=imax)\n return param\n\n @staticmethod\n def params_to_roidata(params: gds.DataSetGroup) -> np.ndarray:\n \"\"\"Convert ROI dataset group to ROI array data.\n\n Args:\n params (DataSetGroup): ROI dataset group\n\n Returns:\n numpy.ndarray: ROI array data\n \"\"\"\n roilist = []\n for roiparam in params.datasets:\n roiparam: ROIParam\n roilist.append([roiparam.col1, roiparam.col2])\n if len(roilist) == 0:\n return None\n return np.array(roilist, int)\n\n def new_roi_item(self, fmt: str, lbl: bool, editable: bool):\n \"\"\"Return a new ROI item from scratch\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n \"\"\"\n coords = self.x.min(), self.x.max()\n return base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n \"ROI\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def iterate_roi_items(self, fmt: str, lbl: bool, editable: bool = True):\n \"\"\"Make plot item representing a Region of Interest.\n\n Args:\n fmt (str): format string\n lbl (bool): if True, add label\n editable (bool): if True, ROI is editable\n\n Yields:\n PlotItem: plot item\n \"\"\"\n if self.roi is not None:\n for index, coords in enumerate(self.x[self.roi]):\n yield base.make_roi_item(\n lambda x, y, _title: make.range(x, y),\n coords,\n f\"ROI{index:02d}\",\n fmt,\n lbl,\n editable,\n option=\"shape/drag\",\n )\n\n def add_label_with_title(self, title: str | None = None) -> None:\n \"\"\"Add label with title annotation\n\n Args:\n title (str): title (if None, use signal title)\n \"\"\"\n title = self.title if title is None else title\n if title:\n label = make.label(title, \"TL\", (0, 0), \"TL\")\n self.add_annotations_from_items([label])" } ]
import guidata.dataset as gds import numpy as np import scipy.integrate as spt import scipy.ndimage as spi import scipy.optimize as spo import scipy.signal as sps from cdl.algorithms import fit from cdl.algorithms.signal import ( derivative, interpolate, moving_average, normalize, peak_indexes, xpeak, xy_fft, xy_ifft, ) from cdl.config import _ from cdl.core.computation.base import ( ClipParam, FFTParam, GaussianParam, MovingAverageParam, MovingMedianParam, ThresholdParam, ) from cdl.core.model.signal import SignalObj
10,905
class XYCalibrateParam(gds.DataSet): """Signal calibration parameters""" axes = (("x", _("X-axis")), ("y", _("Y-axis"))) axis = gds.ChoiceItem(_("Calibrate"), axes, default="y") a = gds.FloatItem("a", default=1.0) b = gds.FloatItem("b", default=0.0) def compute_calibration(src: SignalObj, p: XYCalibrateParam) -> SignalObj: """Compute linear calibration Args: src (SignalObj): source signal p (XYCalibrateParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "calibration", f"{p.axis}={p.a}*{p.axis}+{p.b}") x, y = src.get_data() if p.axis == "x": dst.set_xydata(p.a * x + p.b, y) else: dst.set_xydata(x, p.a * y + p.b) return dst def compute_threshold(src: SignalObj, p: ThresholdParam) -> SignalObj: """Compute threshold clipping Args: src (SignalObj): source signal p (ThresholdParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "threshold", f"min={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, p.value, y.max())) return dst def compute_clip(src: SignalObj, p: ClipParam) -> SignalObj: """Compute maximum data clipping Args: src (SignalObj): source signal p (ClipParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "clip", f"max={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, y.min(), p.value)) return dst def compute_gaussian_filter(src: SignalObj, p: GaussianParam) -> SignalObj: """Compute gaussian filter Args: src (SignalObj): source signal p (GaussianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "gaussian_filter", f"σ={p.sigma:.3f}") x, y = src.get_data() dst.set_xydata(x, spi.gaussian_filter1d(y, p.sigma)) return dst def compute_moving_average(src: SignalObj, p: MovingAverageParam) -> SignalObj: """Compute moving average Args: src (SignalObj): source signal p (MovingAverageParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_average", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, moving_average(y, p.n)) return dst def compute_moving_median(src: SignalObj, p: MovingMedianParam) -> SignalObj: """Compute moving median Args: src (SignalObj): source signal p (MovingMedianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_median", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, sps.medfilt(y, kernel_size=p.n)) return dst def compute_wiener(src: SignalObj) -> SignalObj: """Compute Wiener filter Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "wiener") x, y = src.get_data() dst.set_xydata(x, sps.wiener(y)) return dst def compute_fft(src: SignalObj, p: FFTParam) -> SignalObj: """Compute FFT Args: src (SignalObj): source signal p (FFTParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "fft") x, y = src.get_data()
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ .. Signal computation objects (see parent package :mod:`cdl.core.computation`) """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # Note: # ---- # All dataset classes must also be imported in the cdl.core.computation.param module. from __future__ import annotations VALID_DTYPES_STRLIST = SignalObj.get_valid_dtypenames() def dst_11(src: SignalObj, name: str, suffix: str | None = None) -> SignalObj: """Create result signal object for compute_11 function Args: src (SignalObj): source signal name (str): name of the function Returns: SignalObj: result signal object """ dst = src.copy(title=f"{name}({src.short_id})") if suffix is not None: dst.title += "|" + suffix return dst def dst_n1n(src1: SignalObj, src2: SignalObj, name: str, suffix: str | None = None): """Create result signal object for compute_n1n function Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 name (str): name of the function Returns: SignalObj: result signal object """ dst = src1.copy(title=f"{name}({src1.short_id}, {src2.short_id})") if suffix is not None: dst.title += "|" + suffix return dst # -------- compute_n1 functions -------------------------------------------------------- # Functions with N input signals and 1 output signal # -------------------------------------------------------------------------------------- # Those functions are perfoming a computation on N input signals and return a single # output signal. If we were only executing these functions locally, we would not need # to define them here, but since we are using the multiprocessing module, we need to # define them here so that they can be pickled and sent to the worker processes. # Also, we need to systematically return the output signal object, even if it is already # modified in place, because the multiprocessing module will not be able to retrieve # the modified object from the worker processes. def compute_add(dst: SignalObj, src: SignalObj) -> SignalObj: """Add signal to result signal Args: dst (SignalObj): destination signal src (SignalObj): source signal """ dst.y += np.array(src.y, dtype=dst.y.dtype) if dst.dy is not None: dst.dy = np.sqrt(dst.dy**2 + src.dy**2) return dst def compute_product(dst: SignalObj, src: SignalObj) -> SignalObj: """Multiply signal to result signal Args: dst (SignalObj): destination signal src (SignalObj): source signal """ dst.y *= np.array(src.y, dtype=dst.y.dtype) if dst.dy is not None: dst.dy = dst.y * np.sqrt((dst.dy / dst.y) ** 2 + (src.dy / src.y) ** 2) return dst # -------- compute_n1n functions ------------------------------------------------------- # Functions with N input images + 1 input image and N output images # -------------------------------------------------------------------------------------- def compute_difference(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute difference between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "difference") dst.y = src1.y - src2.y if dst.dy is not None: dst.dy = np.sqrt(src1.dy**2 + src2.dy**2) return dst def compute_quadratic_difference(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute quadratic difference between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "quadratic_difference") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, (y1 - np.array(y2, dtype=y1.dtype)) / np.sqrt(2.0)) if np.issubdtype(dst.data.dtype, np.unsignedinteger): dst.data[src1.data < src2.data] = 0 if dst.dy is not None: dst.dy = np.sqrt(src1.dy**2 + src2.dy**2) return dst def compute_division(src1: SignalObj, src2: SignalObj) -> SignalObj: """Compute division between two signals Args: src1 (SignalObj): source signal 1 src2 (SignalObj): source signal 2 Returns: SignalObj: result signal object """ dst = dst_n1n(src1, src2, "division") x1, y1 = src1.get_data() _x2, y2 = src2.get_data() dst.set_xydata(x1, y1 / np.array(y2, dtype=y1.dtype)) return dst # -------- compute_11 functions -------------------------------------------------------- # Functions with 1 input image and 1 output image # -------------------------------------------------------------------------------------- def extract_multiple_roi(src: SignalObj, group: gds.DataSetGroup) -> SignalObj: """Extract multiple regions of interest from data Args: src (SignalObj): source signal group (gds.DataSetGroup): group of parameters Returns: SignalObj: signal with multiple regions of interest """ suffix = None if len(group.datasets) == 1: p = group.datasets[0] suffix = f"indexes={p.col1:d}:{p.col2:d}" dst = dst_11(src, "extract_multiple_roi", suffix) x, y = src.get_data() xout, yout = np.ones_like(x) * np.nan, np.ones_like(y) * np.nan for p in group.datasets: slice0 = slice(p.col1, p.col2 + 1) xout[slice0], yout[slice0] = x[slice0], y[slice0] nans = np.isnan(xout) | np.isnan(yout) dst.set_xydata(xout[~nans], yout[~nans]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def extract_single_roi(src: SignalObj, p: gds.DataSet) -> SignalObj: """Extract single region of interest from data Args: src (SignalObj): source signal p (gds.DataSet): parameters Returns: SignalObj: signal with single region of interest """ dst = dst_11(src, "extract_single_roi", f"indexes={p.col1:d}:{p.col2:d}") x, y = src.get_data() dst.set_xydata(x[p.col1 : p.col2 + 1], y[p.col1 : p.col2 + 1]) # TODO: [P2] Instead of removing geometric shapes, apply roi extract dst.remove_all_shapes() return dst def compute_swap_axes(src: SignalObj) -> SignalObj: """Swap axes Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "swap_axes") x, y = src.get_data() dst.set_xydata(y, x) return dst def compute_abs(src: SignalObj) -> SignalObj: """Compute absolute value Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "abs") x, y = src.get_data() dst.set_xydata(x, np.abs(y)) return dst def compute_re(src: SignalObj) -> SignalObj: """Compute real part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "re") x, y = src.get_data() dst.set_xydata(x, np.real(y)) return dst def compute_im(src: SignalObj) -> SignalObj: """Compute imaginary part Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "im") x, y = src.get_data() dst.set_xydata(x, np.imag(y)) return dst class DataTypeSParam(gds.DataSet): """Convert signal data type parameters""" dtype_str = gds.ChoiceItem( _("Destination data type"), list(zip(VALID_DTYPES_STRLIST, VALID_DTYPES_STRLIST)), help=_("Output image data type."), ) def compute_astype(src: SignalObj, p: DataTypeSParam) -> SignalObj: """Convert data type Args: src: source signal p: parameters Returns: Result signal object """ dst = dst_11(src, "astype", f"dtype={p.dtype_str}") dst.xydata = src.xydata.astype(p.dtype_str) return dst def compute_log10(src: SignalObj) -> SignalObj: """Compute Log10 Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "log10") x, y = src.get_data() dst.set_xydata(x, np.log10(y)) return dst class PeakDetectionParam(gds.DataSet): """Peak detection parameters""" threshold = gds.IntItem( _("Threshold"), default=30, min=0, max=100, slider=True, unit="%" ) min_dist = gds.IntItem(_("Minimum distance"), default=1, min=1, unit="points") def compute_peak_detection(src: SignalObj, p: PeakDetectionParam) -> SignalObj: """Peak detection Args: src (SignalObj): source signal p (PeakDetectionParam): parameters Returns: SignalObj: result signal object """ dst = dst_11( src, "peak_detection", f"threshold={p.threshold}%, min_dist={p.min_dist}pts" ) x, y = src.get_data() indexes = peak_indexes(y, thres=p.threshold * 0.01, min_dist=p.min_dist) dst.set_xydata(x[indexes], y[indexes]) dst.metadata["curvestyle"] = "Sticks" return dst class NormalizeYParam(gds.DataSet): """Normalize parameters""" methods = ( (_("maximum"), "maximum"), (_("amplitude"), "amplitude"), (_("sum"), "sum"), (_("energy"), "energy"), ) method = gds.ChoiceItem(_("Normalize with respect to"), methods) def compute_normalize(src: SignalObj, p: NormalizeYParam) -> SignalObj: """Normalize data Args: src (SignalObj): source signal p (NormalizeYParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "normalize", f"ref={p.method}") x, y = src.get_data() dst.set_xydata(x, normalize(y, p.method)) return dst def compute_derivative(src: SignalObj) -> SignalObj: """Compute derivative Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "derivative") x, y = src.get_data() dst.set_xydata(x, derivative(x, y)) return dst def compute_integral(src: SignalObj) -> SignalObj: """Compute integral Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "integral") x, y = src.get_data() dst.set_xydata(x, spt.cumtrapz(y, x, initial=0.0)) return dst class XYCalibrateParam(gds.DataSet): """Signal calibration parameters""" axes = (("x", _("X-axis")), ("y", _("Y-axis"))) axis = gds.ChoiceItem(_("Calibrate"), axes, default="y") a = gds.FloatItem("a", default=1.0) b = gds.FloatItem("b", default=0.0) def compute_calibration(src: SignalObj, p: XYCalibrateParam) -> SignalObj: """Compute linear calibration Args: src (SignalObj): source signal p (XYCalibrateParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "calibration", f"{p.axis}={p.a}*{p.axis}+{p.b}") x, y = src.get_data() if p.axis == "x": dst.set_xydata(p.a * x + p.b, y) else: dst.set_xydata(x, p.a * y + p.b) return dst def compute_threshold(src: SignalObj, p: ThresholdParam) -> SignalObj: """Compute threshold clipping Args: src (SignalObj): source signal p (ThresholdParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "threshold", f"min={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, p.value, y.max())) return dst def compute_clip(src: SignalObj, p: ClipParam) -> SignalObj: """Compute maximum data clipping Args: src (SignalObj): source signal p (ClipParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "clip", f"max={p.value}") x, y = src.get_data() dst.set_xydata(x, np.clip(y, y.min(), p.value)) return dst def compute_gaussian_filter(src: SignalObj, p: GaussianParam) -> SignalObj: """Compute gaussian filter Args: src (SignalObj): source signal p (GaussianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "gaussian_filter", f"σ={p.sigma:.3f}") x, y = src.get_data() dst.set_xydata(x, spi.gaussian_filter1d(y, p.sigma)) return dst def compute_moving_average(src: SignalObj, p: MovingAverageParam) -> SignalObj: """Compute moving average Args: src (SignalObj): source signal p (MovingAverageParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_average", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, moving_average(y, p.n)) return dst def compute_moving_median(src: SignalObj, p: MovingMedianParam) -> SignalObj: """Compute moving median Args: src (SignalObj): source signal p (MovingMedianParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "moving_median", f"n={p.n:d}") x, y = src.get_data() dst.set_xydata(x, sps.medfilt(y, kernel_size=p.n)) return dst def compute_wiener(src: SignalObj) -> SignalObj: """Compute Wiener filter Args: src (SignalObj): source signal Returns: SignalObj: result signal object """ dst = dst_11(src, "wiener") x, y = src.get_data() dst.set_xydata(x, sps.wiener(y)) return dst def compute_fft(src: SignalObj, p: FFTParam) -> SignalObj: """Compute FFT Args: src (SignalObj): source signal p (FFTParam): parameters Returns: SignalObj: result signal object """ dst = dst_11(src, "fft") x, y = src.get_data()
dst.set_xydata(*xy_fft(x, y, shift=p.shift))
7
2023-11-09 16:56:03+00:00
16k
lalalamdbf/PLSE_IDRR
src/prompt-tuning/prompt/pipeline_base.py
[ { "identifier": "InputExample", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputExample(object):\n \"\"\"A raw input example consisting of segments of text,\n a label for classification task or a target sequence of generation task.\n Other desired information can be passed via meta.\n\n Args:\n guid (:obj:`str`, optional): A unique identifier of the example.\n text_a (:obj:`str`, optional): The placeholder for sequence of text.\n text_b (:obj:`str`, optional): A secend sequence of text, which is not always necessary.\n label (:obj:`int`, optional): The label id of the example in classification task.\n tgt_text (:obj:`Union[str,List[str]]`, optional): The target sequence of the example in a generation task..\n meta (:obj:`Dict`, optional): An optional dictionary to store arbitrary extra information for the example.\n \"\"\"\n\n def __init__(self,\n guid = None,\n text_a = \"\",\n text_b = \"\",\n label = None,\n meta: Optional[Dict] = None,\n tgt_text: Optional[Union[str,List[str]]] = None\n ):\n\n self.guid = guid\n self.text_a = text_a\n self.text_b = text_b\n self.label = label\n self.meta = meta if meta else {}\n self.tgt_text = tgt_text\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def to_dict(self):\n r\"\"\"Serialize this instance to a Python dictionary.\"\"\"\n output = copy.deepcopy(self.__dict__)\n return output\n\n def to_json_string(self):\n r\"\"\"Serialize this instance to a JSON string.\"\"\"\n return json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"\n\n def keys(self, keep_none=False):\n return [key for key in self.__dict__.keys() if getattr(self, key) is not None]\n\n @staticmethod\n def load_examples(path: str) -> List['InputExample']:\n \"\"\"Load a set of input examples from a file\"\"\"\n with open(path, 'rb') as fh:\n return pickle.load(fh)\n\n @staticmethod\n def save_examples(examples: List['InputExample'], path: str) -> None:\n \"\"\"Save a set of input examples to a file\"\"\"\n with open(path, 'wb') as fh:\n pickle.dump(examples, fh)" }, { "identifier": "InputFeatures", "path": "src/prompt-tuning/prompt/data_utils.py", "snippet": "class InputFeatures(dict):\n \"\"\"\n The class for input to the PLM and Prompts. To make users explicitly know the available keys,\n we define a dict with a set of predefined possible keys. The default value to any key is None.\n When use it as a dict, all the keys whose values are None are invisible.\n\n This class support most of the dict's operation (See Examples). It can also be consumed by\n pytorch's default_collate in DataLoader.\n Also a :py:meth:`to_tensor()` method is build to convert the values into torch.Tensor for torch's input.\n\n Examples:\n\n .. code-block:: python\n\n in_feat = InputFeatures(**{'input_ids':[1,4,5], 'soft_token_ids': [3,4,5]}) # init from dict\n print(in_feat.keys()) # ['input_ids, 'soft_token_ids']\n in_feat['label'] = 3 # can assign value like normal dict\n print(in_feat.keys()) # ['input_ids','label', 'soft_token_ids'] (Note that it's also ordered)\n print(in_feat['label']) # 3\n in_feat['alice'] = 0 # KeyError: Key alice not in predefined set of keys\n in_feat.values() # [[1,4,5], 3, [3,4,5]] (Note that it's also ordered)\n [in_feat[key] for key in in_feat] # [[1,4,5], 3, [3,4,5]]\n new_dict= {**in_feat, 'new_key':2} # new_dict is {'input_ids': [1, 4, 5], 'label': 3, 'soft_token_ids': [3, 4, 5], 'new_key': 2}\n\n Args:\n input_ids: Indices of input sequence tokens in the vocabulary.\n attention_mask: Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded)\n tokens.\n token_type_ids: (Optional) Segment token indices to indicate first and second\n portions of the inputs. Only some models use them.\n label: (Optional) Label corresponding to the input. Int for classification problems,\n float for regression problems.\n \"\"\"\n tensorable_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','conns_index']\n all_keys = ['input_ids', 'inputs_embeds', 'attention_mask', 'token_type_ids', 'label',\n 'decoder_input_ids', 'decoder_inputs_embeds', 'soft_token_ids',\n 'past_key_values', 'loss_ids','guid', 'tgt_text', 'encoded_tgt_text', 'input_ids_len','conns_index']\n non_tensorable_keys = []\n\n def __init__(self,\n input_ids: Optional[Union[List, torch.Tensor]] = None,\n inputs_embeds: Optional[torch.Tensor] = None,\n attention_mask: Optional[Union[List[int], torch.Tensor]] = None,\n token_type_ids: Optional[Union[List[int], torch.Tensor]] = None,\n label: Optional[Union[int, torch.Tensor]] = None,\n decoder_input_ids: Optional[Union[List, torch.Tensor]] = None,\n decoder_inputs_embeds: Optional[torch.Tensor] = None,\n soft_token_ids: Optional[Union[List, torch.Tensor]] = None,\n past_key_values: Optional[torch.Tensor] = None, # for prefix_tuning\n loss_ids: Optional[Union[List, torch.Tensor]] = None,\n guid: Optional[str] = None,\n tgt_text: Optional[str] = None,\n use_cache: Optional[bool] = None,\n encoded_tgt_text: Optional[str] = None,\n input_ids_len: Optional[int] = None,\n conns_index = None,\n **kwargs):\n\n self.input_ids = input_ids\n self.inputs_embeds = inputs_embeds\n self.attention_mask = attention_mask\n self.token_type_ids = token_type_ids\n self.label = label\n self.decoder_input_ids = decoder_input_ids\n self.decoder_inputs_embeds = decoder_inputs_embeds\n self.soft_token_ids = soft_token_ids\n self.past_key_values = past_key_values\n self.loss_ids = loss_ids\n self.guid = guid\n self.tgt_text = tgt_text\n self.encoded_tgt_text = encoded_tgt_text\n self.use_cache = use_cache\n self.input_ids_len = input_ids_len\n self.conns_index = conns_index\n\n for k in kwargs.keys():\n setattr(self, k, kwargs[k])\n\n @classmethod\n def add_tensorable_keys(cls, *args):\n cls.tensorable_keys.extend(args)\n\n @classmethod\n def add_not_tensorable_keys(cls, *args):\n cls.not_tensorable_keys.extend(args)\n\n @classmethod\n def add_keys(cls, *args):\n cls.all_keys.extend(args)\n\n def __repr__(self):\n return str(self.to_json_string())\n\n def __len__(self):\n return len(self.keys())\n\n def to_tensor(self, device: str = 'cuda'):\n \"\"\"inplace operation, convert all tensorable features into :obj:`torch.tensor`\"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, torch.tensor(value))\n return self\n\n def to(self, device: str = \"cuda:0\"):\n r\"\"\"move the tensor keys to runtime device, such as gpu:0\n \"\"\"\n for key in self.tensorable_keys:\n value = getattr(self, key)\n if value is not None:\n setattr(self, key, value.to(device))\n return self\n\n def cuda(self, device: str = \"cuda:0\"):\n r\"\"\"mimic the tensor behavior\n \"\"\"\n return self.to(device)\n\n def to_json_string(self, keep_none=False):\n \"\"\"Serializes this instance to a JSON string.\"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if isinstance(value, torch.Tensor):\n data[key] = value.detach().cpu().tolist()\n elif value is None and keep_none:\n data[key] = None\n else:\n data[key] = value\n return json.dumps(data) + \"\\n\"\n\n def keys(self, keep_none=False) -> List[str]:\n \"\"\"get all keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[str]`: keys of the InputFeatures\n \"\"\"\n if keep_none:\n return self.all_keys\n else:\n return [key for key in self.all_keys if getattr(self, key) is not None]\n\n def to_dict(self, keep_none=False) -> Dict[str, Any]:\n \"\"\"get the dict of mapping from keys to values of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`Dict[str, Any]`: dict of mapping from keys to values of the InputFeatures\n \"\"\"\n data = {}\n for key in self.all_keys:\n value = getattr(self, key)\n if value is not None:\n data[key] = value\n elif value is None and keep_none:\n data[key] = None\n return data\n\n def __getitem__(self, key):\n return getattr(self, key)\n\n def __iter__(self):\n return iter(self.keys())\n\n def __setitem__(self, key, item):\n if key not in self.all_keys:\n raise KeyError(\"Key {} not in predefined set of keys\".format(key))\n setattr(self, key, item)\n\n def values(self, keep_none=False) -> List[Any]:\n \"\"\"get the values with respect to the keys of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the values with respect to the keys of the InputFeatures\n \"\"\"\n return [getattr(self, key) for key in self.keys(keep_none=keep_none)]\n\n def __contains__(self, key, keep_none=False):\n return key in self.keys(keep_none)\n\n def items(self,):\n \"\"\"get the (key, value) pairs of the InputFeatures\n\n Args:\n keep_none (:obj:`bool`, optional): whether to keep the predefined keys whose value is none. Defaults to False.\n\n Returns:\n :obj:`List[Any]`: the (key, value) pairs of the InputFeatures\n \"\"\"\n return [(key, self.__getitem__(key)) for key in self.keys()]\n\n @staticmethod\n def collate_fct(batch: List):\n r'''\n This function is used to collate the input_features.\n\n Args:\n batch (:obj:`List[Union[Dict, InputFeatures]]`): A batch of the current data.\n\n Returns:\n :obj:`InputFeatures`: Return the :py:class:`~openprompt.data_utils.data_utils.InputFeatures of the current batch of data.\n '''\n\n\n elem = batch[0]\n return_dict = {}\n for key in elem:\n if key == \"encoded_tgt_text\":\n return_dict[key] = [d[key] for d in batch]\n else:\n try:\n return_dict[key] = default_collate([d[key] for d in batch])\n except:\n print(f\"key{key}\\n d {[batch[i][key] for i in range(len(batch))]} \")\n\n return InputFeatures(**return_dict)" }, { "identifier": "TokenizerWrapper", "path": "src/prompt-tuning/prompt/utils.py", "snippet": "class TokenizerWrapper:\n def __init__(self,\n max_seq_length: int,\n tokenizer: PreTrainedTokenizer,\n # truncate_method: Optional[str] = 'tail',\n create_token_type_ids: Optional[str] = False,\n segment_emb: Optional[str] = False,\n **kwargs):\n self.max_seq_length = max_seq_length\n\n self.tokenizer = tokenizer\n self.truncate_fct = self.truncate_from_tail\n\n self.create_token_type_ids = create_token_type_ids\n self.segment_emb = segment_emb\n\n self.template_mask_token = '<mask>'\n # self.template_eos_token = '<eos>'\n # self.template_bos_token = '<bos>'\n self.template_sep_token = '<sep>'\n self.template_cls_token = '<cls>'\n self.template_pad_token = '<pad>'\n\n from transformers import logging\n verbosity_before = logging.get_verbosity()\n logging.set_verbosity(logging.CRITICAL) # TODO solve this in a more elegant way\n self.mask_token_map = {self.template_mask_token: self.tokenizer.mask_token if hasattr(self.tokenizer, 'mask_token') else ''}\n # self.eos_token_map = {self.template_eos_token: self.tokenizer.eos_token if hasattr(self.tokenizer, 'eos_token') else ''}\n # self.bos_token_map = {self.template_bos_token: self.tokenizer.bos_token if hasattr(self.tokenizer, 'bos_token') else ''}\n self.sep_token_map = {self.template_sep_token: self.tokenizer.sep_token if hasattr(self.tokenizer, 'sep_token') else ''}\n self.cls_token_map = {self.template_cls_token: self.tokenizer.cls_token if hasattr(self.tokenizer, 'cls_token') else ''}\n self.pad_token_map = {self.template_pad_token: self.tokenizer.pad_token if hasattr(self.tokenizer, 'pad_token') else ''}\n logging.set_verbosity(verbosity_before)\n\n self.num_truncated_sentences = 0\n self.total_passed_sentences = 0\n\n @property\n def truncate_rate(self,):\n r\"\"\"Using this function, one can easily identify how many sentence has be truncated, thus help the user to choose a better thresthold for chunking.\n \"\"\"\n if self.total_passed_sentences==0:\n return None\n else:\n return self.num_truncated_sentences/self.total_passed_sentences\n\n @property\n def special_tokens_maps(self,) -> Dict:\n r\"\"\"This need to be specified in specific language model\n \"\"\"\n if not hasattr(self, \"_special_tokens_map\"):\n _special_tokens_map = {}\n for attrname in self.__dict__.keys():\n if attrname.endswith('_token_map'):\n _special_tokens_map.update(getattr(self, attrname))\n return _special_tokens_map\n\n def tokenize_with_mask(self,\n wrapped_example: List[Dict],\n ) -> InputFeatures:\n raise NotImplementedError\n\n def tokenize_without_mask(self,\n wrapped_example: List[Dict],\n ) -> InputFeatures:\n raise NotImplementedError\n\n\n @staticmethod\n def truncate_from_tail(input_dict: Dict,\n num_tokens_to_truncate: int=0) -> Dict:\n r\"\"\"truncate the inputs from the rear\n \"\"\"\n truncated_example = defaultdict(list)\n shortenable_ids = input_dict['shortenable_ids']\n for key in input_dict:\n parts = input_dict[key]\n to_trunc = num_tokens_to_truncate\n for i, part in enumerate(parts[::-1]):\n if len(part) == 0: # to prevent some part are empty after tokenization\n continue\n if shortenable_ids[-1-i][0]==0: # ==0 means the part is not shortenable\n continue\n parts[-1-i] = part[:-to_trunc] if to_trunc<len(part) else []\n to_trunc -= len(part)\n if to_trunc <= 0:\n break\n truncated_example[key] = parts\n return truncated_example\n\n\n @staticmethod\n def concate_parts(input_dict: Dict) -> Dict:\n for key in input_dict:\n input_dict[key] = list(itertools.chain(*input_dict[key]))\n return input_dict\n\n @staticmethod\n def padding(input_dict: Dict,\n max_len: int, pad_id_for_inputs: int=0, pad_id_for_others: int=0) -> None:\n for key, value in input_dict.items():\n if (len(input_dict[key]) > max_len):\n raise ValueError(f'''Truncated seq length of '{key}' still greater than max length {max_len}.\"\\\n \"One possible reason is that no enough shortenable parts in template. Try adding {{\"shortenable\": \"True\"}} property.\n ''')\n if 'input' in key:\n input_dict[key].extend([pad_id_for_inputs]*(max_len-len(value)))\n else:\n input_dict[key].extend([pad_id_for_others]*(max_len-len(value)))\n return input_dict\n\n\n def add_special_tokens(self, encoder_inputs):\n # add special tokens\n for key in encoder_inputs:\n if key == \"input_ids\":\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n encoder_inputs[key] = self.tokenizer.build_inputs_with_special_tokens(\n encoder_inputs[key])\n else:\n special_tokens_mask = np.array(self.tokenizer.get_special_tokens_mask(encoder_inputs[key]))\n with_special_tokens = np.array(self.tokenizer.build_inputs_with_special_tokens(encoder_inputs[key]))\n if key in [\"soft_token_ids\"]: # TODO maybe more than this\n encoder_inputs[key] = ((1-special_tokens_mask) * with_special_tokens).tolist() # use 0 as special\n else:\n encoder_inputs[key] = ((1-special_tokens_mask) * with_special_tokens - special_tokens_mask*100).tolist() # use -100 as special\n return encoder_inputs\n\n def truncate(self, encoder_inputs):\n total_tokens = sum([len(part) for part in encoder_inputs['input_ids']])\n num_specials = self.num_special_tokens_to_add\n num_tokens_to_truncate = total_tokens - self.max_seq_length + num_specials\n self.total_passed_sentences+=1\n if num_tokens_to_truncate>0:\n self.num_truncated_sentences += 1\n encoder_inputs = self.truncate_fct(input_dict=encoder_inputs,\n num_tokens_to_truncate=num_tokens_to_truncate)\n return encoder_inputs" }, { "identifier": "Template", "path": "src/prompt-tuning/prompt/prompt_base.py", "snippet": "class Template(nn.Module):\n r'''\n Base class for all the templates.\n Most of methods are abstract, with some exceptions to hold the common methods for all template, such as ``loss_ids``, ``save``, ``load``.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n placeholder_mapping (:obj:`dict`): A place holder to represent the original input text.\n '''\n\n registered_inputflag_names = [\"loss_ids\", \"shortenable_ids\"]\n\n def __init__(self,\n tokenizer: PreTrainedTokenizer,\n placeholder_mapping: dict = {'<text_a>':'text_a','<text_b>':'text_b'},\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.placeholder_mapping = placeholder_mapping\n self._in_on_text_set = False\n\n self.mixed_token_start = \"{\"\n self.mixed_token_end = \"}\"\n\n\n def get_default_loss_ids(self) -> List[int]:\n '''Get the loss indices for the template using mask.\n e.g. when self.text is ``'{\"placeholder\": \"text_a\"}. {\"meta\": \"word\"} is {\"mask\"}.'``,\n output is ``[0, 0, 0, 0, 1, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]:\n\n - 1 for a masked tokens.\n - 0 for a sequence tokens.\n '''\n return [1 if 'mask' in d else 0 for d in self.text]\n\n def get_default_shortenable_ids(self) -> List[int]:\n \"\"\"Every template needs shortenable_ids, denoting which part of the template can be truncate to fit\n the language model's ``max_seq_length``. Default: the input text is shortenable, while the template text and other\n special tokens are not shortenable.\n\n e.g. when self.text is ``'{\"placeholder\": \"text_a\"} {\"placeholder\": \"text_b\", \"shortenable\": False} {\"meta\": \"word\"} is {\"mask\"}.'``,\n output is ``[1, 0, 0, 0, 0, 0, 0]``.\n\n Returns:\n :obj:`List[int]`: A list of integers in the range ``[0, 1]``:\n\n - 1 for the input tokens.\n - 0 for the template sequence tokens.\n \"\"\"\n idx = []\n for d in self.text:\n if 'shortenable' in d:\n idx.append(1 if d['shortenable'] else 0)\n else:\n idx.append(1 if 'placeholder' in d else 0)\n return idx\n\n def get_default_soft_token_ids(self) -> List[int]:\n r'''\n This function identifies which tokens are soft tokens.\n\n Sometimes tokens in the template are not from the vocabulary,\n but a sequence of soft tokens.\n In this case, you need to implement this function\n\n Raises:\n NotImplementedError: if needed, add ``soft_token_ids`` into ``registered_inputflag_names`` attribute of Template class and implement this method.\n '''\n raise NotImplementedError\n\n def incorporate_text_example(self,\n example: InputExample,\n text = None,\n ):\n if text is None:\n text = self.text.copy()\n else:\n text = text.copy()\n\n for i, d in enumerate(text):\n if 'placeholder' in d:\n text[i] = d[\"add_prefix_space\"] + d.get(\"post_processing\", lambda x:x)(getattr(example, d['placeholder']))\n elif 'meta' in d:\n text[i] = d[\"add_prefix_space\"] + d.get(\"post_processing\", lambda x:x)(example.meta[d['meta']])\n elif 'soft' in d:\n text[i] = ''; # unused\n elif 'mask' in d:\n text[i] = '<mask>'\n elif 'special' in d:\n text[i] = d['special']\n elif 'text' in d:\n text[i] = d[\"add_prefix_space\"] + d['text']\n else:\n raise ValueError(f'can not parse {d}')\n return text\n\n def _check_template_format(self, ):\n r\"\"\"check whether the template format is correct.\n TODO: add more\n \"\"\"\n mask_num = 0\n for i, d in enumerate(self.text):\n if 'mask' in d:\n mask_num += 1\n\n if mask_num==0:\n raise RuntimeError(f\"'mask' position not found in the template: {self.text}. Please Check!\")\n\n\n\n\n def parse_text(self, text: str) -> List[Dict]:\n parsed = []\n i = 0\n while i < len(text):\n d = {\"add_prefix_space\": ' ' if (i > 0 and text[i-1] == ' ') else ''}\n while i < len(text) and text[i] == ' ':\n d[\"add_prefix_space\"] = ' '\n i = i + 1\n if i == len(text): break\n\n if text[i] != self.mixed_token_start:\n j = i + 1\n while j < len(text):\n if text[j] == self.mixed_token_start:\n break\n j = j + 1\n d[\"text\"] = text[i:j].rstrip(' ')\n i = j\n\n else:\n j = i + 1\n mixed_token_cnt = 1 # { {} {} } nested support\n while j < len(text):\n if text[j] == self.mixed_token_end:\n mixed_token_cnt -= 1\n if mixed_token_cnt == 0: break\n elif text[j] == self.mixed_token_start:\n mixed_token_cnt += 1\n j = j + 1\n if j == len(text):\n raise ValueError(f\"mixed_token_start {self.mixed_token_start} at position {i} has no corresponding mixed_token_end {self.mixed_token_end}\")\n dict_str = '{'+text[i+1:j]+'}'\n try:\n val = eval(dict_str)\n if isinstance(val, set):\n val = {k: None for k in val}\n d.update(val)\n except:\n import traceback\n print(traceback.format_exc())\n print(f\"syntax error in {dict_str}\")\n exit()\n i = j + 1\n\n parsed.append(d)\n\n return parsed\n\n # @abstractmethod\n def wrap_one_example(self,\n example: InputExample) -> List[Dict]:\n r'''Given an input example which contains input text, which can be referenced\n by self.template.placeholder_mapping 's value.\n This function process the example into a list of dict,\n Each dict functions as a group, which has the sample properties, such as\n whether it's shortenable, whether it's the masked position, whether it's soft token, etc.\n Since a text will be tokenized in the subsequent processing procedure,\n these attributes are broadcasted along the tokenized sentence.\n\n Args:\n example (:obj:`InputExample`): An :py:class:`~openprompt.data_utils.data_utils.InputExample` object, which should have attributes that are able to be filled in the template.\n\n Returns:\n :obj:`List[Dict]`: A list of dict of the same length as self.text. e.g. ``[{\"loss_ids\": 0, \"text\": \"It was\"}, {\"loss_ids\": 1, \"text\": \"<mask>\"}, ]``\n '''\n\n if self.text is None:\n raise ValueError(\"template text has not been initialized\")\n if isinstance(example, InputExample):\n text = self.incorporate_text_example(example)\n\n not_empty_keys = example.keys()\n for placeholder_token in self.placeholder_mapping:\n not_empty_keys.remove(self.placeholder_mapping[placeholder_token]) # placeholder has been processed, remove\n not_empty_keys.remove('meta') # meta has been processed\n\n keys, values= ['text'], [text]\n for inputflag_name in self.registered_inputflag_names:\n keys.append(inputflag_name)\n v = None\n if hasattr(self, inputflag_name) and getattr(self, inputflag_name) is not None:\n v = getattr(self, inputflag_name)\n elif hasattr(self, \"get_default_\"+inputflag_name):\n v = getattr(self, \"get_default_\"+inputflag_name)()\n setattr(self, inputflag_name, v) # cache\n else:\n raise ValueError(\"\"\"\n Template's inputflag '{}' is registered but not initialize.\n Try using template.{} = [...] to initialize\n or create an method get_default_{}(self) in your template.\n \"\"\".format(inputflag_name, inputflag_name, inputflag_name))\n\n if len(v) != len(text):\n raise ValueError(\"Template: len({})={} doesn't match len(text)={}.\"\\\n .format(inputflag_name, len(v), len(text)))\n values.append(v)\n wrapped_parts_to_tokenize = []\n for piece in list(zip(*values)):\n wrapped_parts_to_tokenize.append(dict(zip(keys, piece)))\n\n wrapped_parts_not_tokenize = {key: getattr(example, key) for key in not_empty_keys}\n return [wrapped_parts_to_tokenize, wrapped_parts_not_tokenize]\n else:\n raise TypeError(\"InputExample\")\n\n @abstractmethod\n def process_batch(self, batch):\n r\"\"\"Template should rewrite this method if you need to process the batch input such as substituting embeddings.\n \"\"\"\n return batch # not being processed\n\n def post_processing_outputs(self, outputs):\n r\"\"\"Post processing the outputs of language models according\n to the need of template. Most templates don't need post processing,\n The template like SoftTemplate, which appends soft template as a module\n (rather than a sequence of input tokens) to the input,\n should remove the outputs on these positions to keep the seq_len the same\n \"\"\"\n return outputs\n\n def save(self,\n path: str,\n **kwargs) -> None:\n r'''\n A save method API.\n\n Args:\n path (str): A path to save your template.\n '''\n raise NotImplementedError\n\n @property\n def text(self):\n return self._text\n\n @text.setter\n def text(self, text):\n self._text = text\n if text is None:\n return\n if not self._in_on_text_set:\n self.safe_on_text_set()\n self._check_template_format()\n\n def safe_on_text_set(self) -> None:\n r\"\"\"With this wrapper function, setting text inside ``on_text_set()``\n will not trigger ``on_text_set()`` again to prevent endless recursion.\n \"\"\"\n self._in_on_text_set = True\n self.on_text_set()\n self._in_on_text_set = False\n\n @abstractmethod\n def on_text_set(self):\n r\"\"\"\n A hook to do something when template text was set.\n The designer of the template should explicitly know what should be down when the template text is set.\n \"\"\"\n raise NotImplementedError\n\n def from_file(self,\n path: str,\n choice: int = 0,\n ):\n r'''\n Read the template from a local file.\n\n Args:\n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The id-th line of the file.\n '''\n with open(path, 'r') as fin:\n text = fin.readlines()[choice].rstrip()\n self.text = text\n return self" }, { "identifier": "Verbalizer", "path": "src/prompt-tuning/prompt/prompt_base.py", "snippet": "class Verbalizer(nn.Module):\n r'''\n Base class for all the verbalizers.\n\n Args:\n tokenizer (:obj:`PreTrainedTokenizer`): A tokenizer to appoint the vocabulary and the tokenization strategy.\n classes (:obj:`Sequence[str]`): A sequence of classes that need to be projected.\n '''\n def __init__(self,\n tokenizer: Optional[PreTrainedTokenizer] = None,\n classes: Optional[Sequence[str]] = None,\n num_classes: Optional[int] = None,\n ):\n super().__init__()\n self.tokenizer = tokenizer\n self.classes = classes\n if classes is not None and num_classes is not None:\n assert len(classes) == num_classes, \"len(classes) != num_classes, Check you config.\"\n self.num_classes = num_classes\n elif num_classes is not None:\n self.num_classes = num_classes\n elif classes is not None:\n self.num_classes = len(classes)\n else:\n self.num_classes = None\n # raise AttributeError(\"No able to configure num_classes\")\n self._in_on_label_words_set = False\n\n @property\n def label_words(self,):\n r'''\n Label words means the words in the vocabulary projected by the labels.\n E.g. if we want to establish a projection in sentiment classification: positive :math:`\\rightarrow` {`wonderful`, `good`},\n in this case, `wonderful` and `good` are label words.\n '''\n if not hasattr(self, \"_label_words\"):\n raise RuntimeError(\"label words haven't been set.\")\n return self._label_words\n\n @label_words.setter\n def label_words(self, label_words):\n if label_words is None:\n return\n self._label_words = self._match_label_words_to_label_ids(label_words)\n if not self._in_on_label_words_set:\n self.safe_on_label_words_set()\n\n def _match_label_words_to_label_ids(self, label_words): # TODO newly add function after docs written # TODO rename this function\n \"\"\"\n sort label words dict of verbalizer to match the label order of the classes\n \"\"\"\n if isinstance(label_words, dict):\n if self.classes is None:\n raise ValueError(\"\"\"\n classes attribute of the Verbalizer should be set since your given label words is a dict.\n Since we will match the label word with respect to class A, to A's index in classes\n \"\"\")\n if set(label_words.keys()) != set(self.classes):\n raise ValueError(\"name of classes in verbalizer are different from those of dataset\")\n label_words = [ # sort the dict to match dataset\n label_words[c]\n for c in self.classes\n ] # length: label_size of the whole task\n elif isinstance(label_words, list) or isinstance(label_words, tuple):\n pass\n # logger.info(\"\"\"\n # Your given label words is a list, by default, the ith label word in the list will match class i of the dataset.\n # Please make sure that they have the same order.\n # Or you can pass label words as a dict, mapping from class names to label words.\n # \"\"\")\n else:\n raise ValueError(\"Verbalizer label words must be list, tuple or dict\")\n return label_words\n\n def safe_on_label_words_set(self,):\n self._in_on_label_words_set = True\n self.on_label_words_set()\n self._in_on_label_words_set = False\n\n def on_label_words_set(self,):\n r\"\"\"A hook to do something when textual label words were set.\n \"\"\"\n pass\n\n @property\n def vocab(self,) -> Dict:\n if not hasattr(self, '_vocab'):\n self._vocab = self.tokenizer.convert_ids_to_tokens(np.arange(self.vocab_size).tolist())\n return self._vocab\n\n @property\n def vocab_size(self,) -> int:\n return self.tokenizer.vocab_size\n\n @abstractmethod\n def generate_parameters(self, **kwargs) -> List:\n r\"\"\"\n The verbalizer can be seen as an extra layer on top of the original\n pre-trained models. In manual verbalizer, it is a fixed one-hot vector of dimension\n ``vocab_size``, with the position of the label word being 1 and 0 everywhere else.\n In other situation, the parameters may be a continuous vector over the\n vocab, with each dimension representing a weight of that token.\n Moreover, the parameters may be set to trainable to allow label words selection.\n\n Therefore, this function serves as an abstract methods for generating the parameters\n of the verbalizer, and must be instantiated in any derived class.\n\n Note that the parameters need to be registered as a part of pytorch's module to\n It can be achieved by wrapping a tensor using ``nn.Parameter()``.\n \"\"\"\n raise NotImplementedError\n\n def register_calibrate_logits(self, logits: torch.Tensor):\n r\"\"\"\n This function aims to register logits that need to be calibrated, and detach the original logits from the current graph.\n \"\"\"\n if logits.requires_grad:\n logits = logits.detach()\n self._calibrate_logits = logits\n\n def process_outputs(self,\n outputs: torch.Tensor,\n conn_linear_logits = None, \n **kwargs):\n r\"\"\"By default, the verbalizer will process the logits of the PLM's\n output.\n\n Args:\n logits (:obj:`torch.Tensor`): The current logits generated by pre-trained language models.\n batch (:obj:`Union[Dict, InputFeatures]`): The input features of the data.\n \"\"\"\n if conn_linear_logits != None:\n return self.process_logits(outputs, conn_linear_logits, **kwargs)\n else:\n return self.process_logits(outputs, **kwargs)\n\n def gather_outputs(self, outputs: ModelOutput):\n r\"\"\" retrieve useful output for the verbalizer from the whole model output\n By default, it will only retrieve the logits\n\n Args:\n outputs (:obj:`ModelOutput`) The output from the pretrained language model.\n\n Return:\n :obj:`torch.Tensor` The gathered output, should be of shape (``batch_size``,\n ``seq_len``, ``any``)\n \"\"\"\n return outputs.logits\n\n @staticmethod\n def aggregate(label_words_logits: torch.Tensor) -> torch.Tensor:\n r\"\"\" To aggregate logits on multiple label words into the label's logits\n Basic aggregator: mean of each label words' logits to a label's logits\n Can be re-implemented in advanced verbaliezer.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`): The logits of the label words only.\n\n Return:\n :obj:`torch.Tensor`: The final logits calculated by the label words.\n \"\"\"\n if label_words_logits.dim()>2:\n return label_words_logits.mean(dim=-1)\n else:\n return label_words_logits\n\n\n def normalize(self, logits: torch.Tensor) -> torch.Tensor:\n r\"\"\"\n Given logits regarding the entire vocab, calculate the probs over the label words set by softmax.\n\n Args:\n logits(:obj:`Tensor`): The logits of the entire vocab.\n\n Returns:\n :obj:`Tensor`: The probability distribution over the label words set.\n \"\"\"\n batch_size = logits.shape[0]\n return F.softmax(logits.reshape(batch_size, -1), dim=-1).reshape(*logits.shape)\n\n @abstractmethod\n def project(self,\n logits: torch.Tensor,\n **kwargs) -> torch.Tensor:\n r\"\"\"This method receives input logits of shape ``[batch_size, vocab_size]``, and use the\n parameters of this verbalizer to project the logits over entire vocab into the\n logits of labels words.\n\n Args:\n logits (:obj:`Tensor`): The logits over entire vocab generated by the pre-trained language model with shape [``batch_size``, ``max_seq_length``, ``vocab_size``]\n\n Returns:\n :obj:`Tensor`: The normalized probs (sum to 1) of each label .\n \"\"\"\n raise NotImplementedError\n\n def handle_multi_token(self, label_words_logits, mask):\n r\"\"\"\n Support multiple methods to handle the multi tokens produced by the tokenizer.\n We suggest using 'first' or 'max' if the some parts of the tokenization is not meaningful.\n Can broadcast to 3-d tensor.\n\n Args:\n label_words_logits (:obj:`torch.Tensor`):\n\n Returns:\n :obj:`torch.Tensor`\n \"\"\"\n if self.multi_token_handler == \"first\":\n label_words_logits = label_words_logits.select(dim=-1, index=0)\n elif self.multi_token_handler == \"max\":\n label_words_logits = label_words_logits - 1000*(1-mask.unsqueeze(0))\n label_words_logits = label_words_logits.max(dim=-1).values\n elif self.multi_token_handler == \"mean\":\n label_words_logits = (label_words_logits*mask.unsqueeze(0)).sum(dim=-1)/(mask.unsqueeze(0).sum(dim=-1)+1e-15)\n else:\n raise ValueError(\"multi_token_handler {} not configured\".format(self.multi_token_handler))\n return label_words_logits\n\n @classmethod\n \n\n def from_file(self,\n path: str,\n choice: Optional[int] = 0 ):\n r\"\"\"Load the predefined label words from verbalizer file.\n Currently support three types of file format:\n 1. a .jsonl or .json file, in which is a single verbalizer\n in dict format.\n 2. a .jsonal or .json file, in which is a list of verbalizers in dict format\n 3. a .txt or a .csv file, in which is the label words of a class are listed in line,\n separated by commas. Begin a new verbalizer by an empty line.\n This format is recommended when you don't know the name of each class.\n\n The details of verbalizer format can be seen in :ref:`How_to_write_a_verbalizer`.\n\n Args:\n path (:obj:`str`): The path of the local template file.\n choice (:obj:`int`): The choice of verbalizer in a file containing\n multiple verbalizers.\n\n Returns:\n Template : `self` object\n \"\"\"\n if path.endswith(\".txt\") or path.endswith(\".csv\"):\n with open(path, 'r') as f:\n lines = f.readlines()\n label_words_all = []\n label_words_single_group = []\n for line in lines:\n line = line.strip().strip(\" \")\n if line == \"\":\n if len(label_words_single_group)>0:\n label_words_all.append(label_words_single_group)\n label_words_single_group = []\n else:\n label_words_single_group.append(line)\n if len(label_words_single_group) > 0: # if no empty line in the last\n label_words_all.append(label_words_single_group)\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n\n label_words = label_words_all[choice]\n label_words = [label_words_per_label.strip().split(\",\") \\\n for label_words_per_label in label_words]\n\n elif path.endswith(\".jsonl\") or path.endswith(\".json\"):\n with open(path, \"r\") as f:\n label_words_all = json.load(f)\n # if it is a file containing multiple verbalizers\n if isinstance(label_words_all, list):\n if choice >= len(label_words_all):\n raise RuntimeError(\"choice {} exceed the number of verbalizers {}\"\n .format(choice, len(label_words_all)))\n label_words = label_words_all[choice]\n elif isinstance(label_words_all, dict):\n label_words = label_words_all\n if choice>0:\n print(\"Choice of verbalizer is 1, but the file \\\n only contains one verbalizer.\")\n\n self.label_words = label_words\n if self.num_classes is not None:\n num_classes = len(self.label_words)\n assert num_classes==self.num_classes, 'number of classes in the verbalizer file\\\n does not match the predefined num_classes.'\n return self" } ]
from pickle import FALSE from torch.utils.data.sampler import RandomSampler from transformers.configuration_utils import PretrainedConfig from transformers.generation_utils import GenerationMixin from torch.utils.data import Dataset from typing import * from .data_utils import InputExample, InputFeatures from torch.utils.data._utils.collate import default_collate from tqdm.std import tqdm from transformers.tokenization_utils import PreTrainedTokenizer from transformers.utils.dummy_pt_objects import PreTrainedModel from .utils import TokenizerWrapper from .prompt_base import Template, Verbalizer from collections import defaultdict from collections import namedtuple from torch.utils.data import DataLoader import torch import torch.nn as nn import inspect import numpy as np
10,960
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None,
def signature(f): r"""Get the function f 's input arguments. A useful gadget when some function slot might be instantiated into multiple functions. Args: f (:obj:`function`) : the function to get the input arguments. Returns: namedtuple : of args, default, varargs, keywords, respectively.s """ sig = inspect.signature(f) args = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD ] varargs = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_POSITIONAL ] varargs = varargs[0] if varargs else None keywords = [ p.name for p in sig.parameters.values() if p.kind == inspect.Parameter.VAR_KEYWORD ] keywords = keywords[0] if keywords else None defaults = [ p.default for p in sig.parameters.values() if p.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD and p.default is not p.empty ] or None argspec = namedtuple('Signature', ['args', 'defaults', 'varargs', 'keywords']) return argspec(args, defaults, varargs, keywords) class PromptDataLoader(object): r""" PromptDataLoader wraps the original dataset. The input data is firstly wrapped with the prompt's template, and then is tokenized by a wrapperd-tokenizer. Args: dataset (:obj:`Dataset` or :obj:`List`): Either a DatasetObject or a list containing the input examples. template (:obj:`Template`): A derived class of :obj:`Template` tokenizer (:obj:`PretrainedTokenizer`): The pretrained tokenizer. tokenizer_wrapper_class (:cls:`TokenizerWrapper`): The class of tokenizer wrapper. max_seq_length (:obj:`int`, optional): The max sequence length of the input ids. It's used to truncate sentences. batch_size (:obj:`int`, optional): The batch_size of data loader teacher_forcing (:obj:`bool`, optional): Whether to fill the mask with target text. Set to true in training generation model. decoder_max_length (:obj:`int`, optional): the decoder maximum length of an encoder-decoder model. predict_eos_token (:obj:`bool`, optional): Whether to predict the <eos> token. Suggest to set to true in generation. truncate_method (:obj:`bool`, optional): the truncate method to use. select from `head`, `tail`, `balanced`. kwargs :Other kwargs that might be passed into a tokenizer wrapper. """ def __init__(self, dataset: Union[Dataset, List], template: Template, tokenizer_wrapper: Optional[TokenizerWrapper] = None, tokenizer: PreTrainedTokenizer = None, tokenizer_wrapper_class = None,
verbalizer: Optional[Verbalizer] = None,
4
2023-11-01 08:52:36+00:00
16k
choderalab/chiron
Examples/LJ_langevin.py
[ { "identifier": "LJPotential", "path": "chiron/potential.py", "snippet": "class LJPotential(NeuralNetworkPotential):\n def __init__(\n self,\n topology: Topology,\n sigma: unit.Quantity = 3.350 * unit.angstroms,\n epsilon: unit.Quantity = 1.0 * unit.kilocalories_per_mole,\n cutoff: unit.Quantity = unit.Quantity(1.0, unit.nanometer),\n ):\n \"\"\"\n Initialize the Lennard-Jones potential.\n\n Parameters\n ----------\n topology : Topology\n The topology of the system\n sigma : unit.Quantity, optional\n The distance at which the potential is zero, by default 3.350 * unit.angstroms\n epsilon : unit.Quantity, optional\n The depth of the potential well, by default 1.0 * unit.kilocalories_per_mole\n cutoff : unit.Quantity, optional\n The cutoff distance for the potential, by default 1.0 * unit.nanometer\n\n \"\"\"\n\n if not isinstance(topology, Topology):\n if not isinstance(topology, property):\n if topology is not None:\n raise TypeError(\n f\"Topology must be a Topology object or None, type(topology) = {type(topology)}\"\n )\n if not isinstance(sigma, unit.Quantity):\n raise TypeError(\n f\"sigma must be a unit.Quantity, type(sigma) = {type(sigma)}\"\n )\n if not isinstance(epsilon, unit.Quantity):\n raise TypeError(\n f\"epsilon must be a unit.Quantity, type(epsilon) = {type(epsilon)}\"\n )\n if not isinstance(cutoff, unit.Quantity):\n raise TypeError(\n f\"cutoff must be a unit.Quantity, type(cutoff) = {type(cutoff)}\"\n )\n\n if not sigma.unit.is_compatible(unit.angstrom):\n raise ValueError(f\"sigma must have units of distance, got {sigma.unit}\")\n if not epsilon.unit.is_compatible(unit.kilocalories_per_mole):\n raise ValueError(f\"epsilon must have units of energy, got {epsilon.unit}\")\n if not cutoff.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"cutoff must have units of distance, got {cutoff.unit}\")\n\n self.sigma = sigma.value_in_unit_system(\n unit.md_unit_system\n ) # The distance at which the potential is zero\n self.epsilon = epsilon.value_in_unit_system(\n unit.md_unit_system\n ) # The depth of the potential well\n # The cutoff for a potential is often linked with the parameters and isn't really\n # something I think we should be changing dynamically.\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.topology = topology\n\n from functools import partial\n\n @partial(jax.jit, static_argnums=(0,))\n def _compute_energy_masked(self, distance, mask):\n \"\"\"\n Compute the LJ energy based on an array representing the distances between a given particle and its neighbors.\n Since the distance array is padded to a fixed length, we need to mask out the padded values before summing the energy.\n\n Parameters\n ----------\n distance : jnp.array\n The distances between a given particle and its neighbors\n mask : jnp.array\n An array indicating which values in the distance array are valid and which are padded [1.0 or 0.0]\n \"\"\"\n\n # we can just multiply by the mask rather than using jnp.where to mask.\n energy = mask * (\n 4\n * self.epsilon\n * ((self.sigma / distance) ** 12 - (self.sigma / distance) ** 6)\n )\n return energy.sum()\n\n def compute_energy(self, positions: jnp.array, nbr_list=None, debug_mode=False):\n \"\"\"\n Compute the LJ energy.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n nbr_list : NeighborList, default=None\n Instance of a neighbor list or pair list class to use.\n If None, an unoptimized N^2 pairlist will be used without PBC conditions.\n Returns\n -------\n potential_energy : float\n The total potential energy of the system.\n\n \"\"\"\n # Compute the pair distances and displacement vectors\n\n if nbr_list is None:\n log.debug(\n \"nbr_list is None, computing using inefficient N^2 pairlist without PBC.\"\n )\n # Compute the pairlist for a given set of positions and a cutoff distance\n # Note in this case, we do not need the pairs or displacement vectors\n # Since we already calculate the distance in the pairlist computation\n # Pairs and displacement vectors are needed for an analytical evaluation of the force\n # which we will do as part of testing\n distances, displacement_vectors, pairs = self.compute_pairlist(\n positions, self.cutoff\n )\n # if our pairlist is empty, the particles are non-interacting and\n # the energy will be 0\n if distances.shape[0] == 0:\n return 0.0\n\n potential_energy = (\n 4\n * self.epsilon\n * ((self.sigma / distances) ** 12 - (self.sigma / distances) ** 6)\n )\n # sum over all pairs to get the total potential energy\n return potential_energy.sum()\n\n else:\n # ensure the neighborlist has been constructed before trying to use it\n\n if not nbr_list.is_built:\n raise ValueError(\"Neighborlist must be built before use\")\n\n # ensure that the cutoff in the neighbor list is the same as the cutoff in the potential\n if nbr_list.cutoff != self.cutoff:\n raise ValueError(\n f\"Neighborlist cutoff ({nbr_list.cutoff}) must be the same as the potential cutoff ({self.cutoff})\"\n )\n\n n_neighbors, pairs, mask, dist, displacement_vectors = nbr_list.calculate(\n positions\n )\n\n potential_energy = jax.vmap(self._compute_energy_masked, in_axes=(0))(\n dist, mask.astype(jnp.float32)\n )\n return potential_energy.sum()\n\n def compute_force(self, positions: jnp.array, nbr_list=None) -> jnp.array:\n \"\"\"\n Compute the LJ force using the negative of jax.grad.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n nbr_list : NeighborList, optional\n Instance of the neighborlist class to use. By default, set to None, which will use an N^2 pairlist\n\n Returns\n -------\n force : jnp.array\n The forces on the particles in the system\n\n \"\"\"\n # force = -jax.grad(self.compute_energy)(positions, nbr_list)\n # return force\n return super().compute_force(positions, nbr_list=nbr_list)\n\n def compute_force_analytical(\n self,\n positions: jnp.array,\n ) -> jnp.array:\n \"\"\"\n Compute the LJ force using the analytical expression for testing purposes.\n\n Parameters\n ----------\n positions : jnp.array\n The positions of the particles in the system\n\n Returns\n -------\n force : jnp.array\n The forces on the particles in the system\n\n \"\"\"\n dist, displacement_vector, pairs = self.compute_pairlist(positions, self.cutoff)\n\n forces = (\n 24\n * (self.epsilon / (dist * dist))\n * (2 * (self.sigma / dist) ** 12 - (self.sigma / dist) ** 6)\n ).reshape(-1, 1) * displacement_vector\n\n force_array = jnp.zeros((positions.shape[0], 3))\n for force, p1, p2 in zip(forces, pairs[0], pairs[1]):\n force_array = force_array.at[p1].add(force)\n force_array = force_array.at[p2].add(-force)\n return force_array" }, { "identifier": "SamplerState", "path": "chiron/states.py", "snippet": "class SamplerState:\n \"\"\"\n Represents the state of the system that is updated during integration.\n\n Parameters\n ----------\n x0 : unit.Quantity\n The current positions of the particles in the simulation.\n velocities : unit.Quantity, optional\n The velocities of the particles in the simulation.\n box_vectors : unit.Quantity, optional\n The box vectors defining the simulation's periodic boundary conditions.\n\n \"\"\"\n\n def __init__(\n self,\n x0: unit.Quantity,\n velocities: Optional[unit.Quantity] = None,\n box_vectors: Optional[unit.Quantity] = None,\n ) -> None:\n # NOTE: all units are internally in the openMM units system as documented here:\n # http://docs.openmm.org/latest/userguide/theory/01_introduction.html#units\n if not isinstance(x0, unit.Quantity):\n raise TypeError(f\"x0 must be a unit.Quantity, got {type(x0)} instead.\")\n if velocities is not None and not isinstance(velocities, unit.Quantity):\n raise TypeError(\n f\"velocities must be a unit.Quantity, got {type(velocities)} instead.\"\n )\n if box_vectors is not None and not isinstance(box_vectors, unit.Quantity):\n if isinstance(box_vectors, List):\n try:\n box_vectors = self._convert_from_openmm_box(box_vectors)\n except:\n raise TypeError(f\"Unable to parse box_vectors {box_vectors}.\")\n else:\n raise TypeError(\n f\"box_vectors must be a unit.Quantity or openMM box, got {type(box_vectors)} instead.\"\n )\n if not x0.unit.is_compatible(unit.nanometer):\n raise ValueError(f\"x0 must have units of distance, got {x0.unit} instead.\")\n if velocities is not None and not velocities.unit.is_compatible(\n unit.nanometer / unit.picosecond\n ):\n raise ValueError(\n f\"velocities must have units of distance/time, got {velocities.unit} instead.\"\n )\n if box_vectors is not None and not box_vectors.unit.is_compatible(\n unit.nanometer\n ):\n raise ValueError(\n f\"box_vectors must have units of distance, got {box_vectors.unit} instead.\"\n )\n if box_vectors is not None and box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors must be a 3x3 array, got {box_vectors.shape} instead.\"\n )\n\n self._x0 = x0\n self._velocities = velocities\n self._box_vectors = box_vectors\n self._distance_unit = unit.nanometer\n\n @property\n def x0(self) -> jnp.array:\n return self._convert_to_jnp(self._x0)\n\n @property\n def velocities(self) -> jnp.array:\n if self._velocities is None:\n return None\n return self._convert_to_jnp(self._velocities)\n\n @property\n def box_vectors(self) -> jnp.array:\n if self._box_vectors is None:\n return None\n return self._convert_to_jnp(self._box_vectors)\n\n @x0.setter\n def x0(self, x0: Union[jnp.array, unit.Quantity]) -> None:\n if isinstance(x0, unit.Quantity):\n self._x0 = x0\n else:\n self._x0 = unit.Quantity(x0, self._distance_unit)\n\n @property\n def distance_unit(self) -> unit.Unit:\n return self._distance_unit\n\n def _convert_to_jnp(self, array: unit.Quantity) -> jnp.array:\n \"\"\"\n Convert the sampler state to jnp arrays.\n \"\"\"\n import jax.numpy as jnp\n\n array_ = array.value_in_unit_system(unit.md_unit_system)\n return jnp.array(array_)\n\n def _convert_from_openmm_box(self, openmm_box_vectors: List) -> unit.Quantity:\n box_vec = []\n for i in range(0, 3):\n layer = []\n for j in range(0, 3):\n layer.append(\n openmm_box_vectors[i][j].value_in_unit(openmm_box_vectors[0].unit)\n )\n box_vec.append(layer)\n return unit.Quantity(jnp.array(box_vec), openmm_box_vectors[0].unit)" }, { "identifier": "ThermodynamicState", "path": "chiron/states.py", "snippet": "class ThermodynamicState:\n \"\"\"\n Represents the thermodynamic state of the system.\n\n Parameters\n ----------\n potential : NeuralNetworkPotential\n The potential energy function of the system.\n temperature : unit.Quantity, optional\n The temperature of the simulation.\n volume : unit.Quantity, optional\n The volume of the simulation.\n pressure : unit.Quantity, optional\n The pressure of the simulation.\n\n \"\"\"\n\n def __init__(\n self,\n potential: Optional[NeuralNetworkPotential],\n temperature: Optional[unit.Quantity] = None,\n volume: Optional[unit.Quantity] = None,\n pressure: Optional[unit.Quantity] = None,\n ):\n self.potential = potential\n\n if temperature is not None and not isinstance(temperature, unit.Quantity):\n raise TypeError(\n f\"temperature must be a unit.Quantity, got {type(temperature)} instead.\"\n )\n elif temperature is not None:\n if not temperature.unit.is_compatible(unit.kelvin):\n raise ValueError(\n f\"temperature must have units of temperature, got {temperature.unit} instead.\"\n )\n\n if volume is not None and not isinstance(volume, unit.Quantity):\n raise TypeError(\n f\"volume must be a unit.Quantity, got {type(volume)} instead.\"\n )\n elif volume is not None:\n if not volume.unit.is_compatible(unit.nanometer**3):\n raise ValueError(\n f\"volume must have units of distance**3, got {volume.unit} instead.\"\n )\n if pressure is not None and not isinstance(pressure, unit.Quantity):\n raise TypeError(\n f\"pressure must be a unit.Quantity, got {type(pressure)} instead.\"\n )\n elif pressure is not None:\n if not pressure.unit.is_compatible(unit.atmosphere):\n raise ValueError(\n f\"pressure must have units of pressure, got {pressure.unit} instead.\"\n )\n\n self.temperature = temperature\n if temperature is not None:\n self.beta = 1.0 / (unit.BOLTZMANN_CONSTANT_kB * (self.temperature))\n else:\n self.beta = None\n\n self.volume = volume\n self.pressure = pressure\n\n from .utils import get_nr_of_particles\n\n self.nr_of_particles = get_nr_of_particles(self.potential.topology)\n self._check_completness()\n\n def check_variables(self) -> None:\n \"\"\"\n Check if all necessary variables are set and log the simulation ensemble.\n \"\"\"\n variables = [\n \"temperature\",\n \"volume\",\n \"pressure\",\n ]\n set_variables = [var for var in variables if getattr(self, var) is not None]\n return set_variables\n\n def _check_completness(self):\n # check which variables are set\n set_variables = self.check_variables()\n\n if len(set_variables) == 0:\n log.info(\"No variables are set.\")\n\n # print all set variables\n for var in set_variables:\n log.info(f\"{var} is set.\")\n\n if self.temperature and self.volume and self.nr_of_particles:\n log.info(\"NVT ensemble simulated.\")\n if self.temperature and self.pressure and self.nr_of_particles:\n log.info(\"NpT ensemble is simulated.\")\n\n @classmethod\n def are_states_compatible(cls, state1, state2):\n \"\"\"\n Check if two simulation states are compatible.\n\n This method should define the criteria for compatibility,\n such as matching number of particles, etc.\n\n Parameters\n ----------\n state1 : SimulationState\n The first simulation state to compare.\n state2 : SimulationState\n The second simulation state to compare.\n\n Returns\n -------\n bool\n True if states are compatible, False otherwise.\n \"\"\"\n pass\n\n def get_reduced_potential(\n self, sampler_state: SamplerState, nbr_list=None\n ) -> float:\n \"\"\"\n Compute the reduced potential for the given sampler state.\n\n Parameters\n ----------\n sampler_state : SamplerState\n The sampler state for which to compute the reduced potential.\n nbr_list : NeighborList or PairList, optional\n The neighbor list or pair list routine to use for calculating the reduced potential.\n\n Returns\n -------\n float\n The reduced potential of the system.\n\n Notes\n -----\n The reduced potential is computed as:\n u = \\beta [U(x) + p V(x) + \\mu N(x)],\n where \\beta is the inverse temperature, p is the pressure,\n \\mu is the chemical potential, x are the atomic positions,\n U(x) is the potential energy, V(x) is the box volume,\n and N(x) is the number of particles.\n \"\"\"\n if self.beta is None:\n self.beta = 1.0 / (\n unit.BOLTZMANN_CONSTANT_kB * (self.temperature * unit.kelvin)\n )\n log.debug(f\"sample state: {sampler_state.x0}\")\n reduced_potential = (\n unit.Quantity(\n self.potential.compute_energy(sampler_state.x0, nbr_list),\n unit.kilojoule_per_mole,\n )\n ) / unit.AVOGADRO_CONSTANT_NA\n log.debug(f\"reduced potential: {reduced_potential}\")\n if self.pressure is not None:\n reduced_potential += self.pressure * self.volume\n\n return self.beta * reduced_potential\n\n def kT_to_kJ_per_mol(self, energy):\n energy = energy * unit.AVOGADRO_CONSTANT_NA\n return energy / self.beta" }, { "identifier": "NeighborListNsqrd", "path": "chiron/neighbors.py", "snippet": "class NeighborListNsqrd(PairsBase):\n \"\"\"\n N^2 neighborlist implementation that returns the particle pair ids, displacement vectors, and distances.\n\n Parameters\n ----------\n space: Space\n Class that defines how to calculate the displacement between two points and apply the boundary conditions\n cutoff: float, default = 2.5\n Cutoff distance for the neighborlist\n skin: float, default = 0.4\n Skin distance for the neighborlist\n n_max_neighbors: int, default=200\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n This will be checked and dynamically updated during the build stage\n Examples\n --------\n\n\n \"\"\"\n\n def __init__(\n self,\n space: Space,\n cutoff: unit.Quantity = unit.Quantity(1.2, unit.nanometer),\n skin: unit.Quantity = unit.Quantity(0.4, unit.nanometer),\n n_max_neighbors: float = 200,\n ):\n if not isinstance(space, Space):\n raise TypeError(f\"space must be of type Space, found {type(space)}\")\n if not cutoff.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, cutoff.unit = {cutoff.unit}\"\n )\n if not skin.unit.is_compatible(unit.angstrom):\n raise ValueError(\n f\"cutoff must be a unit.Quantity with units of distance, skin.unit = {skin.unit}\"\n )\n\n self.cutoff = cutoff.value_in_unit_system(unit.md_unit_system)\n self.skin = skin.value_in_unit_system(unit.md_unit_system)\n self.cutoff_and_skin = self.cutoff + self.skin\n self.n_max_neighbors = n_max_neighbors\n self.space = space\n\n # set a a simple variable to know if this has at least been built once as opposed to just initialized\n # this does not imply that the neighborlist is up to date\n self.is_built = False\n\n # note, we need to use the partial decorator in order to use the jit decorate\n # so that it knows to ignore the `self` argument\n @partial(jax.jit, static_argnums=(0,))\n def _pairs_mask(self, particle_ids: jnp.array):\n \"\"\"\n Jitted function to generate mask that allows us to remove self-interactions and double-counting of pairs\n\n Parameters\n ----------\n particle_ids: jnp.array\n Array of particle ids\n\n Returns\n -------\n jnp.array\n Bool mask to remove self-interactions and double-counting of pairs\n\n \"\"\"\n # for the nsq approach, we consider the distance between a particle and all other particles in the system\n # if we used a cell list the possible_neighbors would be a smaller list, i.e., only those in the neigboring cells\n\n possible_neighbors = particle_ids\n\n particles_j = jnp.broadcast_to(\n possible_neighbors,\n (particle_ids.shape[0], possible_neighbors.shape[0]),\n )\n\n # reshape the particle_ids\n particles_i = jnp.reshape(particle_ids, (particle_ids.shape[0], 1))\n # create a mask to exclude self interactions and double counting\n temp_mask = particles_i < particles_j\n\n return temp_mask\n\n @partial(jax.jit, static_argnums=(0, 5))\n def _build_neighborlist(\n self, particle_i, reduction_mask, pid, coordinates, n_max_neighbors\n ):\n \"\"\"\n Jitted function to build the neighbor list for a single particle\n\n Parameters\n ----------\n particle_i: jnp.array\n X,Y,Z coordinates of particle i\n reduction_mask: jnp.array\n Mask to exclude self-interactions and double counting of pairs\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n n_max_neighbors: int\n Maximum number of neighbors for each particle. Used for padding arrays for efficient jax computations\n\n Returns\n -------\n neighbor_list_mask: jnp.array\n Mask to exclude padding from the neighbor list\n neighbor_list: jnp.array\n List of particle ids for the neighbors, padded to n_max_neighbors\n n_neighbors: int\n Number of neighbors for the particle\n \"\"\"\n\n # calculate the displacement between particle i and all other particles\n r_ij, dist = self.space.displacement(particle_i, coordinates)\n\n # neighbor_mask will be an array of length n_particles (i.e., length of coordinates)\n # where each element is True if the particle is a neighbor, False if it is not\n # subject to both the cutoff+skin and the reduction mask that eliminates double counting and self-interactions\n neighbor_mask = jnp.where(\n (dist < self.cutoff_and_skin) & (reduction_mask), True, False\n )\n # when we pad the neighbor list, we will use last particle id in the neighbor list\n # this choice was made such that when we use the neighbor list in the masked energy calculat\n # the padded values will result in reasonably well defined values\n fill_value = jnp.argmax(neighbor_mask)\n fill_value = jnp.where(fill_value == pid, fill_value + 1, fill_value)\n\n # count up the number of neighbors\n n_neighbors = jnp.where(neighbor_mask, 1, 0).sum()\n\n # since neighbor_mask indices have a one-to-one correspondence to particle ids,\n # applying jnp.where, will return an array of the indices that are neighbors.\n # since this needs to be uniformly sized, we can just fill this array up to the n_max_neighbors.\n neighbor_list = jnp.array(\n jnp.where(neighbor_mask, size=n_max_neighbors, fill_value=fill_value),\n dtype=jnp.uint32,\n )\n # we need to generate a new mask associatd with the padded neighbor list\n # to be able to quickly exclude the padded values from the neighbor list\n neighbor_list_mask = jnp.where(jnp.arange(n_max_neighbors) < n_neighbors, 1, 0)\n\n del r_ij, dist\n return neighbor_list_mask, neighbor_list, n_neighbors\n\n def build(\n self,\n coordinates: Union[jnp.array, unit.Quantity],\n box_vectors: Union[jnp.array, unit.Quantity],\n ):\n \"\"\"\n Build the neighborlist from an array of coordinates and box vectors.\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n box_vectors: jnp.array\n Shape[3,3] array of box vectors\n\n Returns\n -------\n None\n\n \"\"\"\n\n # set our reference coordinates\n # the call to x0 and box_vectors automatically convert these to jnp arrays in the correct unit system\n if isinstance(coordinates, unit.Quantity):\n if not coordinates.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Coordinates require distance units, not {coordinates.unit}\"\n )\n coordinates = coordinates.value_in_unit_system(unit.md_unit_system)\n\n if isinstance(box_vectors, unit.Quantity):\n if not box_vectors.unit.is_compatible(unit.nanometer):\n raise ValueError(\n f\"Box vectors require distance unit, not {box_vectors.unit}\"\n )\n box_vectors = box_vectors.value_in_unit_system(unit.md_unit_system)\n\n if box_vectors.shape != (3, 3):\n raise ValueError(\n f\"box_vectors should be a 3x3 array, shape provided: {box_vectors.shape}\"\n )\n\n self.ref_coordinates = coordinates\n self.box_vectors = box_vectors\n\n # the neighborlist assumes that the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n self.space.box_vectors = self.box_vectors\n\n # store the ids of all the particles\n self.particle_ids = jnp.array(\n range(0, self.ref_coordinates.shape[0]), dtype=jnp.uint32\n )\n\n # calculate which pairs to exclude\n reduction_mask = self._pairs_mask(self.particle_ids)\n\n # calculate the distance for all pairs this will return\n # neighbor_mask: an array of shape (n_particles, n_particles) where each element is the mask\n # to determine if the particle is a neighbor\n # neighbor_list: an array of shape (n_particles, n_max_neighbors) where each element is the particle id of the neighbor\n # this is padded with zeros to ensure a uniform size;\n # n_neighbors: an array of shape (n_particles) where each element is the number of neighbors for that particle\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n while jnp.any(self.n_neighbors == self.n_max_neighbors).block_until_ready():\n log.debug(\n f\"Increasing n_max_neighbors from {self.n_max_neighbors} to at {jnp.max(self.n_neighbors)+10}\"\n )\n self.n_max_neighbors = int(jnp.max(self.n_neighbors) + 10)\n\n self.neighbor_mask, self.neighbor_list, self.n_neighbors = jax.vmap(\n self._build_neighborlist, in_axes=(0, 0, 0, None, None)\n )(\n self.ref_coordinates,\n reduction_mask,\n self.particle_ids,\n self.ref_coordinates,\n self.n_max_neighbors,\n )\n\n self.neighbor_list = self.neighbor_list.reshape(-1, self.n_max_neighbors)\n\n self.is_built = True\n\n @partial(jax.jit, static_argnums=(0,))\n def _calc_distance_per_particle(\n self, particle1, neighbors, neighbor_mask, coordinates\n ):\n \"\"\"\n Jitted function to calculate the distance between a particle and its neighbors\n\n Parameters\n ----------\n particle1: int\n Particle id\n neighbors: jnp.array\n Array of particle ids for the neighbors of particle1\n neighbor_mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1\n coordinates: jnp.array\n X,Y,Z coordinates of all particles\n\n Returns\n -------\n n_pairs: int\n Number of interacting pairs for the particle\n mask: jnp.array\n Mask to exclude padding from the neighbor list of particle1.\n If a particle is within the interaction cutoff, the mask is 1, otherwise it is 0\n dist: jnp.array\n Array of distances between the particle and its neighbors\n r_ij: jnp.array\n Array of displacement vectors between the particle and its neighbors\n \"\"\"\n # repeat the particle id for each neighbor\n particles1 = jnp.repeat(particle1, neighbors.shape[0])\n\n # calculate the displacement between particle i and all neighbors\n r_ij, dist = self.space.displacement(\n coordinates[particles1], coordinates[neighbors]\n )\n # calculate the mask to determine if the particle is a neighbor\n # this will be done based on the interaction cutoff and using the neighbor_mask to exclude padding\n mask = jnp.where((dist < self.cutoff) & (neighbor_mask), 1, 0)\n\n # calculate the number of pairs\n n_pairs = mask.sum()\n\n return n_pairs, mask, dist, r_ij\n\n def calculate(self, coordinates: jnp.array):\n \"\"\"\n Calculate the neighbor list for the current state\n\n Parameters\n ----------\n coordinates: jnp.array\n Shape[N,3] array of particle coordinates\n\n Returns\n -------\n n_neighbors: jnp.array\n Array of number of neighbors for each particle\n neighbor_list: jnp.array\n Array of particle ids for the neighbors, padded to n_max_neighbors. Shape (n_particles, n_max_neighbors)\n padding_mask: jnp.array\n Array of masks to exclude padding from the neighbor list of each particle. Shape (n_particles, n_max_neighbors)\n dist: jnp.array\n Array of distances between each particle and its neighbors. Shape (n_particles, n_max_neighbors)\n r_ij: jnp.array\n Array of displacement vectors between each particle and its neighbors. Shape (n_particles, n_max_neighbors, 3)\n \"\"\"\n # coordinates = sampler_state.x0\n # note, we assume the box vectors do not change between building and calculating the neighbor list\n # changes to the box vectors require rebuilding the neighbor list\n\n n_neighbors, padding_mask, dist, r_ij = jax.vmap(\n self._calc_distance_per_particle, in_axes=(0, 0, 0, None)\n )(self.particle_ids, self.neighbor_list, self.neighbor_mask, coordinates)\n # mask = mask.reshape(-1, self.n_max_neighbors)\n return n_neighbors, self.neighbor_list, padding_mask, dist, r_ij\n\n @partial(jax.jit, static_argnums=(0,))\n def _calculate_particle_displacement(self, particle, coordinates, ref_coordinates):\n \"\"\"\n Calculate the displacement of a particle from the reference coordinates.\n If the displacement exceeds the half the skin distance, return True, otherwise return False.\n\n This function is designed to allow it to be jitted and vmapped over particle indices.\n\n Parameters\n ----------\n particle: int\n Particle id\n coordinates: jnp.array\n Array of particle coordinates\n ref_coordinates: jnp.array\n Array of reference particle coordinates\n\n Returns\n -------\n bool\n True if the particle is outside the skin distance, False if it is not.\n \"\"\"\n # calculate the displacement of a particle from the initial coordinates\n\n r_ij, displacement = self.space.displacement(\n coordinates[particle], ref_coordinates[particle]\n )\n\n status = jnp.where(displacement >= self.skin / 2.0, True, False)\n del displacement\n return status\n\n def check(self, coordinates: jnp.array) -> bool:\n \"\"\"\n Check if the neighbor list needs to be rebuilt based on displacement of the particles from the reference coordinates.\n If a particle moves more than 0.5 skin distance, the neighborlist will be rebuilt.\n Will also return True if the size of the coordinates array changes.\n\n Note, this could also accept a user defined criteria for distance, but this is not implemented yet.\n\n Parameters\n ----------\n coordinates: jnp.array\n Array of particle coordinates\n Returns\n -------\n bool\n True if the neighbor list needs to be rebuilt, False if it does not.\n \"\"\"\n\n if self.ref_coordinates.shape[0] != coordinates.shape[0]:\n return True\n\n status = jax.vmap(\n self._calculate_particle_displacement, in_axes=(0, None, None)\n )(self.particle_ids, coordinates, self.ref_coordinates)\n if jnp.any(status):\n del status\n return True\n else:\n del status\n return False" }, { "identifier": "OrthogonalPeriodicSpace", "path": "chiron/neighbors.py", "snippet": "class OrthogonalPeriodicSpace(Space):\n \"\"\"\n Defines the simulation space for an orthogonal periodic system.\n\n \"\"\"\n\n @property\n def box_vectors(self) -> jnp.array:\n return self._box_vectors\n\n @box_vectors.setter\n def box_vectors(self, box_vectors: jnp.array) -> None:\n self._box_vectors = box_vectors\n self._box_lengths = jnp.array(\n [box_vectors[0][0], box_vectors[1][1], box_vectors[2][2]]\n )\n\n @partial(jax.jit, static_argnums=(0,))\n def displacement(\n self, xyz_1: jnp.array, xyz_2: jnp.array\n ) -> Tuple[jnp.array, jnp.array]:\n \"\"\"\n Calculate the periodic distance between two points.\n\n Parameters\n ----------\n xyz_1: jnp.array\n Coordinates of the first point\n xyz_2: jnp.array\n Coordinates of the second point\n\n Returns\n -------\n r_ij: jnp.array\n Displacement vector between the two points\n dist: float\n Distance between the two points\n\n \"\"\"\n # calculate uncorrect r_ij\n r_ij = xyz_1 - xyz_2\n\n # calculated corrected displacement vector\n r_ij = (\n jnp.mod(r_ij + self._box_lengths * 0.5, self._box_lengths)\n - self._box_lengths * 0.5\n )\n # calculate the scalar distance\n dist = jnp.linalg.norm(r_ij, axis=-1)\n\n return r_ij, dist\n\n @partial(jax.jit, static_argnums=(0,))\n def wrap(self, xyz: jnp.array) -> jnp.array:\n \"\"\"\n Wrap the coordinates of the system.\n\n Parameters\n ----------\n xyz: jnp.array\n Coordinates of the system\n\n Returns\n -------\n jnp.array\n Wrapped coordinates of the system\n\n \"\"\"\n xyz = xyz - jnp.floor(xyz / self._box_lengths) * self._box_lengths\n\n return xyz" }, { "identifier": "SimulationReporter", "path": "chiron/reporters.py", "snippet": "class SimulationReporter:\n def __init__(self, filename: str, topology: Topology, buffer_size: int = 1):\n \"\"\"\n Initialize the SimulationReporter.\n\n Parameters\n ----------\n filename : str\n Name of the HDF5 file to write the simulation data.\n topology: openmm.Topology\n buffer_size : int, optional\n Number of data points to buffer before writing to disk (default is 1).\n\n \"\"\"\n import mdtraj as md\n\n self.filename = filename\n self.buffer_size = buffer_size\n self.topology = topology\n self.buffer = {}\n self.h5file = h5py.File(filename, \"a\")\n log.info(f\"Writing simulation data to {filename}\")\n\n def get_available_keys(self):\n return self.h5file.keys()\n\n def report(self, data_dict):\n \"\"\"\n Add new data to the buffer and write the buffer to disk if it's full.\n\n Parameters\n ----------\n data_dict : dict\n Dictionary containing data to report. Keys are data labels (e.g., 'energy'),\n and values are the data points (usually numpy arrays).\n\n \"\"\"\n for key, value in data_dict.items():\n if key not in self.buffer:\n self.buffer[key] = []\n self.buffer[key].append(value)\n\n if len(self.buffer[key]) >= self.buffer_size:\n self._write_to_disk(key)\n\n def _write_to_disk(self, key):\n \"\"\"\n Write buffered data of a given key to the HDF5 file.\n\n Parameters\n ----------\n key : str\n The key of the data to write to disk.\n\n \"\"\"\n data = np.array(self.buffer[key])\n if key in self.h5file:\n dset = self.h5file[key]\n dset.resize((dset.shape[0] + data.shape[0],) + data.shape[1:])\n dset[-data.shape[0] :] = data\n else:\n log.debug(f\"Creating {key} in {self.filename}\")\n self.h5file.create_dataset(\n key, data=data, maxshape=(None,) + data.shape[1:], chunks=True\n )\n\n self.buffer[key] = []\n\n def close(self):\n \"\"\"\n Write any remaining data in the buffer to disk and close the HDF5 file.\n\n \"\"\"\n for key in self.buffer:\n if self.buffer[key]:\n self._write_to_disk(key)\n self.h5file.close()\n\n def get_property(self, name: str):\n \"\"\"\n Get the property from the HDF5 file.\n\n Parameters\n ----------\n name : str\n Name of the property to get.\n\n Returns\n -------\n np.ndarray\n The property.\n\n \"\"\"\n if name not in self.h5file:\n log.debug(f\"{name} not in HDF5 file\")\n return None\n else:\n return np.array(self.h5file[name])\n\n def get_mdtraj_trajectory(self):\n import mdtraj as md\n\n return md.Trajectory(\n xyz=self.get_property(\"traj\"),\n topology=md.Topology.from_openmm(self.topology),\n unitcell_lengths=self.get_property(\"box_vectors\"),\n unitcell_angles=self.get_property(\"box_angles\"),\n )" }, { "identifier": "LangevinIntegrator", "path": "chiron/integrators.py", "snippet": "class LangevinIntegrator:\n \"\"\"\n Langevin dynamics integrator for molecular dynamics simulation using the BAOAB splitting scheme [1].\n\n References:\n [1] Benedict Leimkuhler, Charles Matthews;\n Robust and efficient configurational molecular sampling via Langevin dynamics.\n J. Chem. Phys. 7 May 2013; 138 (17): 174102. https://doi.org/10.1063/1.4802990\n\n\n \"\"\"\n\n def __init__(\n self,\n stepsize=1.0 * unit.femtoseconds,\n collision_rate=1.0 / unit.picoseconds,\n save_frequency: int = 100,\n reporter: Optional[SimulationReporter] = None,\n ) -> None:\n \"\"\"\n Initialize the LangevinIntegrator object.\n\n Parameters\n ----------\n stepsize : unit.Quantity, optional\n Time step of integration with units of time. Default is 1.0 * unit.femtoseconds.\n collision_rate : unit.Quantity, optional\n Collision rate for the Langevin dynamics, with units 1/time. Default is 1.0 / unit.picoseconds.\n save_frequency : int, optional\n Frequency of saving the simulation data. Default is 100.\n reporter : SimulationReporter, optional\n Reporter object for saving the simulation data. Default is None.\n \"\"\"\n\n self.kB = unit.BOLTZMANN_CONSTANT_kB * unit.AVOGADRO_CONSTANT_NA\n log.info(f\"stepsize = {stepsize}\")\n log.info(f\"collision_rate = {collision_rate}\")\n log.info(f\"save_frequency = {save_frequency}\")\n\n self.stepsize = stepsize\n self.collision_rate = collision_rate\n if reporter is not None:\n log.info(f\"Using reporter {reporter} saving to {reporter.filename}\")\n self.reporter = reporter\n self.save_frequency = save_frequency\n\n self.velocities = None\n def set_velocities(self, vel: unit.Quantity) -> None:\n \"\"\"\n Set the initial velocities for the Langevin Integrator.\n\n Parameters\n ----------\n vel : unit.Quantity\n Velocities to be set for the integrator.\n \"\"\"\n self.velocities = vel\n\n def run(\n self,\n sampler_state: SamplerState,\n thermodynamic_state: ThermodynamicState,\n n_steps: int = 5_000,\n key=random.PRNGKey(0),\n nbr_list=None,\n progress_bar=False,\n ):\n \"\"\"\n Run the integrator to perform Langevin dynamics molecular dynamics simulation.\n\n Parameters\n ----------\n sampler_state : SamplerState\n The initial state of the simulation, including positions.\n thermodynamic_state : ThermodynamicState\n The thermodynamic state of the system, including temperature and potential.\n n_steps : int, optional\n Number of simulation steps to perform.\n key : jax.random.PRNGKey, optional\n Random key for generating random numbers.\n nbr_list : NeighborListNsqrd, optional\n Neighbor list for the system.\n progress_bar : bool, optional\n Flag indicating whether to display a progress bar during integration.\n\n \"\"\"\n from .utils import get_list_of_mass\n\n potential = thermodynamic_state.potential\n\n mass = get_list_of_mass(potential.topology)\n\n self.box_vectors = sampler_state.box_vectors\n self.progress_bar = progress_bar\n temperature = thermodynamic_state.temperature\n x0 = sampler_state.x0\n\n log.info(\"Running Langevin dynamics\")\n log.info(f\"n_steps = {n_steps}\")\n log.info(f\"temperature = {temperature}\")\n log.info(f\"Using seed: {key}\")\n\n kbT_unitless = (self.kB * temperature).value_in_unit_system(unit.md_unit_system)\n mass_unitless = jnp.array(mass.value_in_unit_system(unit.md_unit_system))[\n :, None\n ]\n sigma_v = jnp.sqrt(kbT_unitless / mass_unitless)\n stepsize_unitless = self.stepsize.value_in_unit_system(unit.md_unit_system)\n collision_rate_unitless = self.collision_rate.value_in_unit_system(\n unit.md_unit_system\n )\n\n # Initialize velocities\n if self.velocities is None:\n v0 = sigma_v * random.normal(key, x0.shape)\n else:\n v0 = self.velocities.value_in_unit_system(unit.md_unit_system)\n # Convert to dimensionless quantities\n a = jnp.exp((-collision_rate_unitless * stepsize_unitless))\n b = jnp.sqrt(1 - jnp.exp(-2 * collision_rate_unitless * stepsize_unitless))\n\n x = x0\n v = v0\n if nbr_list is not None:\n nbr_list.build_from_state(sampler_state)\n\n F = potential.compute_force(x, nbr_list)\n for step in tqdm(range(n_steps)) if self.progress_bar else range(n_steps):\n key, subkey = random.split(key)\n # v\n v += (stepsize_unitless * 0.5) * F / mass_unitless\n # r\n x += (stepsize_unitless * 0.5) * v\n\n if nbr_list is not None:\n x = nbr_list.space.wrap(x)\n # check if we need to rebuild the neighborlist after moving the particles\n if nbr_list.check(x):\n nbr_list.build(x, self.box_vectors)\n # o\n random_noise_v = random.normal(subkey, x.shape)\n v = (a * v) + (b * sigma_v * random_noise_v)\n\n x += (stepsize_unitless * 0.5) * v\n if nbr_list is not None:\n x = nbr_list.space.wrap(x)\n # check if we need to rebuild the neighborlist after moving the particles\n if nbr_list.check(x):\n nbr_list.build(x, self.box_vectors)\n\n F = potential.compute_force(x, nbr_list)\n # v\n v += (stepsize_unitless * 0.5) * F / mass_unitless\n\n if step % self.save_frequency == 0:\n # log.debug(f\"Saving at step {step}\")\n if self.reporter is not None:\n d = {\n \"traj\": x,\n \"energy\": potential.compute_energy(x, nbr_list),\n \"step\": step,\n }\n if nbr_list is not None:\n d[\"box_vectors\"] = nbr_list.space.box_vectors\n\n # log.debug(d)\n self.reporter.report(d)\n\n log.debug(\"Finished running Langevin dynamics\")\n # self.reporter.close()" } ]
from openmmtools.testsystems import LennardJonesFluid from chiron.potential import LJPotential from openmm import unit from chiron.states import SamplerState, ThermodynamicState from chiron.neighbors import NeighborListNsqrd, OrthogonalPeriodicSpace from chiron.reporters import SimulationReporter from chiron.integrators import LangevinIntegrator import os import h5py import matplotlib.pyplot as plt
11,204
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
# Use the LennardJonesFluid example from openmmtools to initialize particle positions and topology # For this example, the topology provides the masses for the particles # The default LennardJonesFluid example considers the system to be Argon with 39.9 amu lj_fluid = LennardJonesFluid(reduced_density=0.1, nparticles=1000) # initialize the LennardJones potential in chiron # sigma = 0.34 * unit.nanometer epsilon = 0.238 * unit.kilocalories_per_mole cutoff = 3.0 * sigma lj_potential = LJPotential( lj_fluid.topology, sigma=sigma, epsilon=epsilon, cutoff=cutoff ) # define the sampler state
sampler_state = SamplerState(
1
2023-11-07 18:17:43+00:00
16k
WolfgangFahl/dcm
dcm/dcm_webserver.py
[ { "identifier": "Assessment", "path": "dcm/dcm_assessment.py", "snippet": "class Assessment:\n \"\"\"\n Assessment for CompetenceTree\n \"\"\"\n\n def __init__(\n self,\n webserver: NiceGuiWebserver,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n debug: bool = False,\n ):\n \"\"\"\n initialize the assessment\n\n Args:\n webserver(NiceguiWebServer): the webserver context\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n debug(bool): if True show debugging information\n \"\"\"\n self.webserver = webserver\n self.debug = debug\n self.reset(dcm=dcm, learner=learner)\n self.setup_ui()\n\n def reset(\n self,\n dcm: DynamicCompetenceMap,\n learner: Learner,\n ):\n \"\"\"\n (re)set the assessment\n\n Args:\n webserver(NiceguiWebServer): the webserver context\n dcm(DynamicCompetenceMap): the competence map\n learner(Learner): the learner to get the self assessment for\n \"\"\"\n self.dcm = dcm\n self.competence_tree = dcm.competence_tree\n self.learner = learner\n self.achievement_index = 0\n # do we need setup the achievements?\n if self.learner.achievements is None:\n self.learner.achievements = []\n self.setup_achievements()\n self.total = len(self.learner.achievements)\n\n def clear(self):\n \"\"\"\n clear the ui\n \"\"\"\n self.container.clear()\n\n @property\n def current_achievement(self) -> Achievement:\n if self.achievement_index < 0 or self.achievement_index > len(\n self.learner.achievements\n ):\n raise ValueError(f\"invalid achievement index {self.achievement_index}\")\n achievement = self.learner.achievements[self.achievement_index]\n return achievement\n\n def setup_achievements(self):\n \"\"\"\n Setup achievements based on the competence tree.\n\n This method iterates over the competence aspects and their facets,\n constructs a path for each facet, and creates an Achievement instance\n based on the path. These achievements are then added to the learner's\n achievements list.\n \"\"\"\n for aspect in self.competence_tree.aspects:\n for area in aspect.areas:\n area_path: str = f\"{self.competence_tree.id}/{aspect.id}\"\n self.add_achievement(area_path)\n for facet in area.facets:\n # Construct the path for the facet\n facet_path=f\"{area_path}/{facet.id}\"\n self.add_achievement(facet_path)\n \n def add_achievement(self,path):\n # Create a new Achievement instance with the constructed path\n new_achievement = Achievement(\n path=path,\n )\n self.learner.add_achievement(new_achievement)\n\n def get_index_str(self) -> str:\n index_str = f\"{self.achievement_index+1:2}/{self.total:2}\"\n return index_str\n\n def setup_ui(self):\n \"\"\"\n display my competence Tree elements\n \"\"\"\n with ui.grid(columns=1).classes(\"w-full\") as self.container:\n self.progress_bar = NiceguiProgressbar(\n total=self.total, desc=\"self assessment\", unit=\"facets\"\n )\n self.progress_bar.reset()\n with ui.row():\n ui.button(\"\", icon=\"arrow_back\", on_click=lambda _args: self.step(-1))\n ui.button(\"\", icon=\"arrow_forward\", on_click=lambda _args: self.step(1))\n with ui.row():\n with ui.card() as self.achievement_view:\n self.index_view = ui.label(self.get_index_str())\n self.link_view = ui.html()\n self.markdown_view = ui.markdown()\n self.button_row = ButtonRow(\n self, self.competence_tree, self.current_achievement\n )\n\n def show_progress(self):\n \"\"\"\n Update the progress bar based on the\n number of achievements with a non-None level value.\n \"\"\"\n count = sum(\n 1\n for achievement in self.learner.achievements\n if achievement.level is not None\n )\n self.progress_bar.total = self.total\n self.progress_bar.update_value(count)\n\n async def step(self, step: int = 0):\n self.update_achievement_view(step)\n\n def update_achievement_view(self, step: int = 0):\n \"\"\"\n display the active achievement as the step indicates\n \"\"\"\n self.show_progress()\n self.webserver.render_dcm(self.dcm, self.learner, clear_assessment=False)\n if self.achievement_index + step < 0:\n ui.notify(\"first achievement reached!\")\n step = 0\n if self.achievement_index + step < len(self.learner.achievements):\n self.achievement_index += step\n self.index_view.text = self.get_index_str()\n achievement = self.current_achievement\n self.button_row.achievement = achievement\n self.button_row.set_button_states(achievement)\n competence_element = self.competence_tree.lookup_by_path(achievement.path)\n if not competence_element:\n ui.notify(\"invalid path: {achievement.path}\")\n self.markdown_view.content = f\"⚠️ {achievement.path}\"\n else:\n if hasattr(competence_element, \"path\"):\n if competence_element.url:\n link = Link.create(\n competence_element.url, competence_element.path\n )\n else:\n link = competence_element.path\n else:\n link = \"⚠️ - competence element path missing\"\n self.link_view.content = link\n description = competence_element.description or \"\"\n if isinstance(competence_element, CompetenceArea):\n aspect = competence_element.aspect\n description = f\"### {aspect.name}\\n\\n**{competence_element.name}**:\\n\\n{description}\"\n if isinstance(competence_element, CompetenceFacet):\n area = competence_element.area\n description = f\"### {area.name}\\n\\n**{competence_element.name}**:\\n\\n{description}\"\n self.markdown_view.content = description\n else:\n ui.notify(\"Done!\")" }, { "identifier": "DcmChart", "path": "dcm/dcm_chart.py", "snippet": "class DcmChart:\n \"\"\"\n a Dynamic competence map chart\n \"\"\"\n\n def __init__(self, dcm: DynamicCompetenceMap):\n \"\"\"\n Constructor\n \"\"\"\n self.dcm = dcm\n\n def generate_svg(\n self,\n filename: Optional[str] = None,\n learner: Optional[Learner] = None,\n config: Optional[SVGConfig] = None,\n ) -> str:\n \"\"\"\n Generate the SVG markup and optionally save it to a file. If a filename is given, the method\n will also save the SVG to that file. The SVG is generated based on internal state not shown here.\n\n Args:\n filename (str, optional): The path to the file where the SVG should be saved. Defaults to None.\n learner(Learner): the learner to show the achievements for\n config (SVGConfig, optional): The configuration for the SVG canvas and legend. Defaults to default values.\n\n Returns:\n str: The SVG markup.\n \"\"\"\n if config is None:\n config = SVGConfig() # Use default configuration if none provided\n svg_markup = self.generate_svg_markup(\n self.dcm.competence_tree, learner=learner, config=config\n )\n if filename:\n self.save_svg_to_file(svg_markup, filename)\n return svg_markup\n\n def generate_donut_segment_for_element(\n self,\n svg: SVG,\n element: CompetenceElement,\n learner: Learner,\n segment: DonutSegment,\n ):\n \"\"\"\n generate a donut segment for a given element of\n the CompetenceTree\n \"\"\"\n # Add the element segment as a donut segment\n element_url = (\n element.url\n if element.url\n else f\"{self.lookup_url}/description/{element.path}\"\n if self.lookup_url is not None\n else None\n )\n show_as_popup = element.url is None\n element_config = element.to_svg_node_config(\n url=element_url,\n show_as_popup=show_as_popup,\n x=self.cx,\n y=self.cy,\n )\n # check learner achievements\n if learner:\n achievement = learner.achievements_by_path.get(element.path, None)\n if achievement and achievement.level:\n element_config.element_class = \"selected\"\n svg.add_donut_segment(config=element_config, segment=segment)\n\n def generate_pie_elements(\n self,\n level: int,\n svg: SVG,\n parent_element: CompetenceElement,\n learner: Learner,\n segment: DonutSegment,\n ):\n \"\"\"\n generate the pie elements (donut segments) for the subelements\n of the given parent_element at the given level\n e.g. aspects, areas or facets - taking the learner\n achievements into account if a corresponding achievement\n is found. The segment limits the area in which the generation may operate\n \"\"\"\n sub_element_name = self.levels[level]\n # get the elements to be displayed\n elements = getattr(parent_element, sub_element_name)\n total = len(elements)\n # are there any elements to be shown?\n if total > 0:\n angle_per_element = (segment.end_angle - segment.start_angle) / total\n start_angle = segment.start_angle\n for element in elements:\n end_angle = start_angle + angle_per_element\n sub_segment = DonutSegment(\n segment.outer_radius,\n segment.outer_radius + self.tree_radius*2,\n start_angle,\n end_angle,\n )\n self.generate_donut_segment_for_element(\n svg, element, learner, segment=sub_segment\n )\n start_angle = end_angle\n if level + 1 < len(self.levels):\n self.generate_pie_elements(\n level=level + 1,\n svg=svg,\n parent_element=element,\n learner=learner,\n segment=sub_segment,\n )\n\n def generate_svg_markup(\n self,\n competence_tree: CompetenceTree = None,\n learner: Learner = None,\n config: SVGConfig = None,\n with_java_script: bool = True,\n lookup_url: str = \"\",\n ) -> str:\n \"\"\"\n generate the SVG markup for the given CompetenceTree and learner\n\n Args:\n\n \"\"\"\n if competence_tree is None:\n competence_tree = self.dcm.competence_tree\n\n svg = SVG(config)\n self.svg = svg\n config = svg.config\n # center of circle\n self.cx = config.width // 2\n self.cy = (config.total_height - config.legend_height) // 2\n self.levels = [\"aspects\", \"areas\", \"facets\"]\n self.tree_radius = config.width / 2 / 8\n\n self.lookup_url = (\n competence_tree.lookup_url if competence_tree.lookup_url else lookup_url\n )\n\n circle_config = competence_tree.to_svg_node_config(\n x=self.cx, \n y=self.cy, \n width=self.tree_radius\n )\n svg.add_circle(config=circle_config)\n\n segment = DonutSegment(\n inner_radius=0, \n outer_radius=self.tree_radius\n )\n self.generate_pie_elements(\n level=0,\n svg=svg,\n parent_element=competence_tree,\n learner=learner,\n segment=segment,\n )\n if config.legend_height > 0:\n competence_tree.add_legend(svg)\n\n return svg.get_svg_markup(with_java_script=with_java_script)\n\n def save_svg_to_file(self, svg_markup: str, filename: str):\n \"\"\"\n Save the SVG content to a file\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(svg_markup)" }, { "identifier": "CompetenceTree", "path": "dcm/dcm_core.py", "snippet": "class CompetenceTree(CompetenceElement, YamlAble[\"CompetenceTree\"]):\n \"\"\"\n Represents the entire structure of competencies, including various aspects and levels.\n\n Attributes:\n competence_aspects (List[CompetenceAspect]): A list of CompetenceAspect objects.\n competence_levels (List[CompetenceLevel]): A list of CompetenceLevel objects representing the different levels in the competence hierarchy.\n element_names (Dict[str, str]): A dictionary holding the names for tree, aspects, facets, and levels. The key is the type (\"tree\", \"aspect\", \"facet\", \"level\").\n \"\"\"\n\n lookup_url: Optional[str] = None\n aspects: List[CompetenceAspect] = field(default_factory=list)\n levels: List[CompetenceLevel] = field(default_factory=list)\n element_names: Dict[str, str] = field(default_factory=dict)\n\n def __post_init__(self):\n \"\"\"\n initalize the path variables of my hierarchy\n \"\"\"\n super().__post_init__()\n self.path = self.id\n # Loop through each competence aspect and set their paths and parent references\n for aspect in self.aspects:\n aspect.competence_tree = self\n aspect.path = f\"{self.id}/{aspect.id}\"\n for area in aspect.areas:\n area.competence_tree = self\n area.aspect = aspect\n area.path = f\"{self.id}/{aspect.id}/{area.id}\"\n for facet in area.facets:\n facet.competence_tree = self\n facet.area = area\n facet.path = f\"{self.id}/{aspect.id}/{area.id}/{facet.id}\"\n\n @classmethod\n def required_keys(cls) -> Tuple:\n keys = {\"name\", \"id\", \"url\", \"description\", \"element_names\"}\n return keys\n\n def lookup_by_path(\n self, path: str, lenient: bool = True\n ) -> Optional[CompetenceElement]:\n \"\"\"\n Look up and return a competence element (tree,aspect of facet)\n based on the given path.\n\n The path is expected to be in the format \"tree_id/aspect_id/facet_id\".\n This method parses the path and retrieves the corresponding competence aspect or facet.\n\n Args:\n path (str): The path in the format \"tree_id/aspect_id/facet_id\".\n\n lenient(bool): if not lenient raise Exceptions for invalid paths and ids\n Returns:\n Optional[CompetenceElement]: The competence aspect or facet corresponding to the given path.\n \"\"\"\n\n def handle_error(msg):\n if not lenient:\n raise ValueError(msg)\n\n parts = path.split(\"/\")\n if len(parts) < 1:\n return None\n\n tree_id = parts[0]\n if tree_id != self.id:\n handle_error(f\"invalid tree_id for lookup {tree_id}\")\n return None\n if len(parts) == 1:\n return self\n if len(parts) > 1:\n aspect_id = parts[1]\n # Retrieve the aspect\n aspect = next((aspect for aspect in self.aspects if aspect.id==aspect_id), None)\n if aspect:\n if len(parts) == 2:\n return aspect\n if len(parts) > 2:\n area_id = parts[2]\n area = next((area for area in aspect.areas if area.id == area_id), None)\n if area:\n if len(parts) == 3:\n return area\n if len(parts) > 3:\n facet_id = parts[3]\n facet = next(\n (facet for facet in area.facets if facet.id == facet_id), None\n )\n if facet:\n return facet\n handle_error(f\"invalid path for lookup {path}\")\n return None\n\n def to_pretty_json(self):\n \"\"\"\n Converts the CompetenceTree object to a pretty JSON string, handling null values.\n \"\"\"\n json_str = self.to_json()\n json_dict = json.loads(json_str)\n\n def remove_none_values(data):\n \"\"\"\n Recursively removes keys with None values from a dictionary, list, or nested structure.\n \"\"\"\n if isinstance(data, dict):\n return {\n k: remove_none_values(v) for k, v in data.items() if v is not None\n }\n elif isinstance(data, list):\n return [remove_none_values(item) for item in data]\n return data\n\n none_free_dict = remove_none_values(json_dict)\n null_free_json_str = json.dumps(none_free_dict, indent=2)\n return null_free_json_str\n\n def add_legend(self, svg: SVG) -> None:\n \"\"\"\n Add a legend to the SVG explaining the color codes for levels and aspects.\n Args:\n svg (SVG): The SVG object to which the legend will be added.\n \"\"\"\n # Starting x position for the legends, starting 10 pixels from the left edge\n x_start = 10\n # y position for the legends, starting 20 pixels from the bottom edge\n y = svg.config.total_height - svg.config.legend_height + 20\n # Width and height of each legend color box\n box_width, box_height = 30, 20\n # Padding between legend items and between the color box and the text\n padding = 5\n\n # Add the competence level legend\n level_items = [(level.color_code, level.name) for level in self.levels]\n svg.add_legend_column(\n level_items,\n self.element_names.get(\"level\", \"Level\"),\n x_start,\n y,\n box_width,\n box_height,\n )\n\n # Calculate the x position for the aspect legend based on the width of the level legend\n x_aspect_start = (\n x_start\n + box_width\n + padding\n + max(svg.get_text_width(level.name) for level in self.levels)\n + padding\n )\n\n # Add the competence aspect legend\n aspect_items = [(aspect.color_code, aspect.name) for aspect in self.aspects]\n svg.add_legend_column(\n aspect_items,\n self.element_names.get(\"aspect\", \"Aspect\"),\n x_aspect_start,\n y,\n box_width,\n box_height,\n )" }, { "identifier": "DynamicCompetenceMap", "path": "dcm/dcm_core.py", "snippet": "class DynamicCompetenceMap:\n \"\"\"\n a visualization of a competence map\n \"\"\"\n\n def __init__(self, competence_tree: CompetenceTree):\n \"\"\"\n constructor\n \"\"\"\n self.competence_tree = competence_tree\n self.svg = None\n\n @property\n def main_id(self):\n main_id = self.competence_tree.id\n return main_id\n\n @classmethod\n def examples_path(cls) -> str:\n # the root directory (default: examples)\n path = os.path.join(os.path.dirname(__file__), \"../dcm_examples\")\n path = os.path.abspath(path)\n return path\n\n @classmethod\n def get_example_dcm_definitions(\n cls,\n markup: str = \"json\",\n required_keys: Optional[Tuple] = None,\n as_text: bool = True,\n ) -> dict:\n \"\"\"\n Retrieve example Dynamic Competence Map (DCM) definitions from files in the specified markup format (either JSON or YAML).\n\n Args:\n markup (str): The markup format of the input files. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n required_keys (Optional[Tuple]): A tuple of keys required to validate the data. If not provided, all keys will be considered valid.\n as_text (bool): If True, returns the file content as text; if False, returns parsed data. Defaults to True.\n\n Returns:\n dict: A dictionary where each key is the prefix of the file name and the value is the file content as text or parsed data, depending on the value of 'as_text'.\n\n Raises:\n Exception: If there's an error in reading or parsing the file, or if the file does not meet the required validation criteria.\n \"\"\"\n example_dcm_defs = {}\n file_ext = f\".{markup}\"\n examples_path = cls.examples_path()\n for dirpath, _dirnames, filenames in os.walk(examples_path):\n for filename in filenames:\n if filename.endswith(file_ext):\n filepath = os.path.join(dirpath, filename)\n with open(filepath, \"r\") as definition_file:\n file_prefix = filename.replace(file_ext, \"\")\n definition_text = definition_file.read()\n try:\n definition_data = cls.parse_markup(definition_text, markup)\n if cls.is_valid_definition(definition_data, required_keys):\n if as_text:\n example_dcm_defs[file_prefix] = definition_text\n else:\n example_dcm_defs[file_prefix] = definition_data\n except Exception as ex:\n cls.handle_markup_issue(\n filename, definition_text, ex, markup\n )\n return example_dcm_defs\n\n @classmethod\n def parse_markup(cls, text: str, markup: str) -> Union[dict, list]:\n \"\"\"\n Parse the given text as JSON or YAML based on the specified markup type.\n\n Args:\n text (str): The string content to be parsed.\n markup (str): The type of markup to use for parsing. Supported values are 'json' and 'yaml'.\n\n Returns:\n Union[dict, list]: The parsed data, which can be either a dictionary or a list, depending on the content.\n\n Raises:\n ValueError: If an unsupported markup format is specified.\n \"\"\"\n if markup == \"json\":\n data=json.loads(text)\n return data\n elif markup == \"yaml\":\n data=yaml.safe_load(text)\n return data\n else:\n raise ValueError(f\"Unsupported markup format: {markup}\")\n\n @classmethod\n def handle_markup_issue(cls, name: str, definition_string: str, ex, markup: str):\n if isinstance(ex, JSONDecodeError):\n lines = definition_string.splitlines() # Split the string into lines\n err_line = lines[ex.lineno - 1] # JSONDecodeError gives 1-based lineno\n pointer = (\n \" \" * (ex.colno - 1) + \"^\"\n ) # Create a pointer string to indicate the error position\n error_message = (\n f\"{name}:JSON parsing error on line {ex.lineno} column {ex.colno}:\\n\"\n f\"{err_line}\\n\"\n f\"{pointer}\\n\"\n f\"{ex.msg}\"\n )\n raise ValueError(error_message) # Raise a new exception with this message\n else:\n error_message = f\"error in {name}: {str(ex)}\"\n raise ValueError(error_message)\n\n @classmethod\n def is_valid_definition(cls, definition_data, required_keys: Tuple):\n return all(key in definition_data for key in required_keys)\n\n @classmethod\n def get_examples(cls, content_class=CompetenceTree, markup: str = \"json\") -> dict:\n examples = {}\n for name, definition_string in cls.get_example_dcm_definitions(\n required_keys=content_class.required_keys(), markup=markup\n ).items():\n example = cls.from_definition_string(\n name, definition_string, content_class, markup=markup\n )\n # check the type of the example\n example_id = example.main_id\n examples[example_id] = example\n return examples\n\n @classmethod\n def from_definition_string(\n cls, name: str, definition_string: str, content_class, markup: str = \"json\"\n ) -> Any:\n \"\"\"\n Load a DynamicCompetenceMap or Learner instance from a definition string (either JSON or YAML).\n\n Args:\n name (str): A name identifier for the data source.\n definition_string (str): The string content of the definition.\n content_class (dataclass_json): The class which will be instantiated with the parsed data.\n markup (str): The markup format of the data. Defaults to 'json'. Supported values are 'json' and 'yaml'.\n\n Returns:\n DynamicCompetenceMap: An instance of DynamicCompetenceMap loaded with the parsed data.\n\n Raises:\n ValueError: If there's an error in parsing the data.\n \"\"\"\n try:\n data = cls.parse_markup(definition_string, markup)\n content = content_class.from_dict(data)\n if isinstance(content, CompetenceTree):\n return DynamicCompetenceMap(content)\n else:\n return content\n except Exception as ex:\n cls.handle_markup_issue(name, definition_string, ex, markup)" }, { "identifier": "Learner", "path": "dcm/dcm_core.py", "snippet": "class Learner:\n \"\"\"\n A learner with achievements.\n Attributes:\n learner_id (str): Identifier for the learner.\n achievements (Dict[str, List[Achievement]]):\n A dictionary where each key is a competence element identifier\n and the value is a list of Achievement instances for that tree.\n \"\"\"\n\n learner_id: str\n achievements: Optional[List[Achievement]] = field(default=None)\n\n def __post_init__(self):\n self.achievements_by_path = {}\n if self.achievements:\n for achievement in self.achievements:\n self.achievements_by_path[achievement.path] = achievement\n\n @classmethod\n def required_keys(cls):\n keys = {\"achievements\"}\n return keys\n\n @property\n def main_id(self):\n main_id = self.learner_id\n return main_id\n\n def add_achievement(self, new_achievement):\n self.achievements.append(new_achievement)\n self.achievements_by_path[new_achievement.path] = new_achievement\n\n def get_competence_tree_ids(self) -> List[str]:\n \"\"\"\n Get all unique competence tree IDs of my achievements.\n\n Returns:\n List[str]: A list of unique competence tree IDs.\n \"\"\"\n # Assuming that the learner's achievements are stored in a list called self.achievements\n # You can modify this part according to your actual data structure.\n\n # Create a set to store unique competence tree IDs\n unique_tree_ids = set()\n\n # Iterate through the learner's achievements\n for achievement in self.achievements:\n # Assuming each achievement has a tree_id attribute\n tree_id = achievement.tree_id\n\n # Add the tree_id to the set\n unique_tree_ids.add(tree_id)\n\n # Convert the set to a list and return\n return list(unique_tree_ids)" }, { "identifier": "SVG", "path": "dcm/svg.py", "snippet": "class SVG:\n \"\"\"\n Class for creating SVG drawings.\n\n Attributes:\n config (SVGConfig): Configuration for the SVG drawing.\n \"\"\"\n\n def __init__(self, config: SVGConfig = None):\n \"\"\"\n Initialize SVG object with given configuration.\n\n Args:\n config (SVGConfig): Configuration for SVG generation.\n \"\"\"\n self.config = config if config else SVGConfig()\n self.width = self.config.width\n self.height = self.config.height\n self.elements = []\n self.indent = self.config.indent\n\n def get_svg_style(self) -> str:\n \"\"\"\n Define styles for SVG elements.\n\n Returns:\n str: String containing style definitions for SVG.\n \"\"\"\n return (\n f\"{self.indent}<style>\\n\"\n f\"{self.indent * 2}.hoverable {{ cursor: pointer; fill-opacity: 1; stroke: black; stroke-width: 0.5; }}\\n\"\n f\"{self.indent * 2}.hoverable:hover {{ fill-opacity: 0.7; }}\\n\"\n f\"{self.indent * 2}.selected {{ fill-opacity: 0.5; stroke: blue; stroke-width: 1.5;}}\\n\"\n f\"{self.indent * 2}.popup {{\\n\"\n f\"{self.indent * 3}border: 2px solid black;\\n\"\n f\"{self.indent * 3}border-radius: 15px;\\n\"\n f\"{self.indent * 3}overflow: auto;\\n\" # changed to 'auto' to allow scrolling only if needed\n f\"{self.indent * 3}background: white;\\n\"\n f\"{self.indent * 3}box-sizing: border-box;\\n\" # ensures padding and border are included\n f\"{self.indent * 3}padding: 10px;\\n\" # optional padding inside the popup\n f\"{self.indent * 3}height: 100%;\\n\" # adjusts height relative to foreignObject\n f\"{self.indent * 3}width: 100%;\\n\" # adjusts width relative to foreignObject\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent * 2}.close-btn {{\\n\" # style for the close button\n f\"{self.indent * 3}cursor: pointer;\\n\"\n f\"{self.indent * 3}position: absolute;\\n\"\n f\"{self.indent * 3}top: 0;\\n\"\n f\"{self.indent * 3}right: 0;\\n\"\n f\"{self.indent * 3}padding: 5px;\\n\"\n f\"{self.indent * 3}font-size: 20px;\\n\"\n f\"{self.indent * 3}user-select: none;\\n\" # prevents text selection on click\n f\"{self.indent * 2}}}\\n\"\n f\"{self.indent}</style>\\n\"\n )\n\n def get_text_width(self, text: str) -> int:\n \"\"\"\n Estimate the width of a text string in the SVG based on the font size and font name.\n\n Args:\n text (str): The text content.\n\n Returns:\n int: The estimated width of the text in pixels.\n \"\"\"\n average_char_width_factor = 0.6\n average_char_width = average_char_width_factor * self.config.font_size\n return int(average_char_width * len(text))\n\n def add_element(self, element: str, level: int = 1, comment: str = None):\n \"\"\"\n Add an SVG element to the elements list with proper indentation.\n\n Args:\n element (str): SVG element to be added.\n level (int): Indentation level for the element.\n comment(str): optional comment to add\n \"\"\"\n base_indent = f\"{self.indent * level}\"\n if comment:\n indented_comment = f\"{base_indent}<!-- {comment} -->\\n\"\n self.elements.append(indented_comment)\n indented_element = f\"{base_indent}{element}\\n\"\n self.elements.append(indented_element)\n\n def add_circle(self, config: SVGNodeConfig):\n \"\"\"\n Add a circle element to the SVG, optionally making it clickable and with a hover effect.\n\n Args:\n config (SVGNodeConfig): Configuration for the circle element.\n \"\"\"\n color = config.fill if config.fill else self.config.default_color\n circle_element = f'<circle cx=\"{config.x}\" cy=\"{config.y}\" r=\"{config.width}\" fill=\"{color}\" class=\"{config.element_class}\" />'\n\n # If URL is provided, wrap the circle in an anchor tag to make it clickable\n if config.url:\n circle_indent = self.indent * (config.indent_level + 1)\n circle_element = f\"\"\"<a xlink:href=\"{config.url}\" target=\"_blank\">\n{circle_indent}{circle_element}\n</a>\"\"\"\n\n # Use add_group to add the circle element with proper indentation\n self.add_group(\n circle_element,\n group_id=config.id,\n group_class=config.element_class,\n level=config.indent_level,\n comment=config.comment,\n )\n\n def add_rectangle(\n self,\n x: int,\n y: int,\n width: int,\n height: int,\n fill: str = None,\n indent_level: int = 1,\n ):\n \"\"\"\n Add a rectangle element to the SVG.\n\n Args:\n x (int): X-coordinate of the rectangle's top-left corner.\n y (int): Y-coordinate of the rectangle's top-left corner.\n width (int): Width of the rectangle.\n height (int): Height of the rectangle.\n fill (str, optional): Fill color of the rectangle. Defaults to the default color.\n indent_level (int): Indentation level for the rectangle.\n \"\"\"\n color = fill if fill else self.config.default_color\n rect = f'{self.indent * 3}<rect x=\"{x}\" y=\"{y}\" width=\"{width}\" height=\"{height}\" fill=\"{color}\" />\\n'\n self.add_element(rect)\n\n def add_legend_column(\n self,\n items: List[Tuple[str, str]],\n title: str,\n x: int,\n y: int,\n width: int,\n height: int,\n ) -> None:\n \"\"\"\n Add a legend column to the SVG.\n\n Args:\n items (List[Tuple[str, str]]): List of tuples with color code and label.\n title (str): Title of the legend.\n x (int): X position of the legend.\n y (int): Y position of the legend.\n width (int): Width of the color box in the legend.\n height (int): Height of each legend item.\n \"\"\"\n self.add_text(x, y - height, title, font_weight=\"bold\")\n for index, (color, label) in enumerate(items):\n self.add_rectangle(x, y + index * (height + 5), width, height, color)\n self.add_text(x + width + 10, y + index * (height + 5) + height / 2, label)\n\n def add_text(\n self,\n x: int,\n y: int,\n text: str,\n fill: str = \"black\",\n font_weight: str = \"normal\",\n text_anchor: str = \"start\",\n ) -> None:\n \"\"\"\n Add text to the SVG.\n\n Args:\n x (int): X position of the text.\n y (int): Y position of the text.\n text (str): Text content.\n fill (str, optional): Fill color of the text. Defaults to \"black\".\n font_weight (str, optional): Font weight (normal, bold, etc.). Defaults to \"normal\".\n text_anchor (str, optional): Text alignment (start, middle, end). Defaults to \"start\".\n \"\"\"\n escaped_text = html.escape(text)\n text_element = (\n f'<text x=\"{x}\" y=\"{y}\" fill=\"{fill}\" '\n f'font-family=\"{self.config.font}\" '\n f'font-size=\"{self.config.font_size}\" '\n f'font-weight=\"{font_weight}\" '\n f'text-anchor=\"{text_anchor}\">'\n f\"{escaped_text}</text>\\n\"\n )\n self.add_element(text_element)\n\n def add_group(\n self,\n content: str,\n group_id: str = None,\n group_class: str = None,\n level: int = 1,\n comment: str = None,\n ):\n \"\"\"\n Add a group of elements to the SVG.\n\n Args:\n content (str): SVG content to be grouped.\n group_id (str, optional): ID for the group.\n group_class (str, optional): Class for the group.\n level (int): Indentation level for the group.\n \"\"\"\n group_attrs = []\n if group_id:\n group_attrs.append(f'id=\"{group_id}\"')\n if group_class:\n group_attrs.append(f'class=\"{group_class}\"')\n attrs_str = \" \".join(group_attrs)\n indented_content = \"\\n\".join(\n f\"{self.indent * (level + 1)}{line}\" for line in content.strip().split(\"\\n\")\n )\n group_str = f\"{self.indent * level}<g {attrs_str}>\\n{indented_content}\\n{self.indent * level}</g>\\n\"\n self.add_element(group_str, level=level, comment=comment)\n\n def add_pie_segment(\n self,\n cx: int,\n cy: int,\n radius: int,\n start_angle_deg: float,\n end_angle_deg: float,\n color: str,\n segment_name: str,\n segment_id: str = None,\n segment_class: str = None,\n segment_url: str = None,\n ) -> None:\n \"\"\"\n Add a pie segment to the SVG.\n\n Args:\n cx (int): X-coordinate of the center of the pie.\n cy (int): Y-coordinate of the center of the pie.\n radius (int): Radius of the pie.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n color (str): Fill color of the segment.\n segment_name (str): Name of the segment, used for the tooltip.\n segment_id (str, optional): ID for the segment group. Defaults to None.\n segment_class (str, optional): Class for the segment group. Defaults to None.\n segment_url (str, optional): URL linked to the segment. Defaults to None.\n\n Returns:\n None\n \"\"\"\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(start_angle_deg)\n end_angle_rad = radians(end_angle_deg)\n\n # Calculate the start and end points\n start_x = cx + radius * cos(start_angle_rad)\n start_y = cy + radius * sin(start_angle_rad)\n end_x = cx + radius * cos(end_angle_rad)\n end_y = cy + radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if end_angle_deg - start_angle_deg >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {cx} {cy} \"\n f\"L {start_x} {start_y} \"\n f\"A {radius} {radius} 0 {large_arc_flag} 1 {end_x} {end_y} \"\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(segment_name) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # If an URL is provided, wrap the content within an anchor\n if segment_url:\n group_content = (\n f'<a xlink:href=\"{segment_url}\" target=\"_blank\">\\n{group_content}</a>\\n'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content, group_id=segment_id, group_class=segment_class, level=2\n )\n\n def add_donut_segment(\n self,\n config: SVGNodeConfig,\n segment: DonutSegment,\n ) -> None:\n \"\"\"\n Add a donut segment to the SVG.\n\n Args:\n config (SVGNodeConfig): Configuration for the donut segment.\n start_angle_deg (float): Start angle of the segment in degrees.\n end_angle_deg (float): End angle of the segment in degrees.\n \"\"\"\n cx, cy = config.x, config.y\n color = config.fill if config.fill else self.config.default_color\n\n if color is None:\n color = self.config.default_color\n # Convert angles from degrees to radians for calculations\n start_angle_rad = radians(segment.start_angle)\n end_angle_rad = radians(segment.end_angle)\n\n # Calculate the start and end points for the outer radius\n start_x_outer = cx + segment.outer_radius * cos(start_angle_rad)\n start_y_outer = cy + segment.outer_radius * sin(start_angle_rad)\n end_x_outer = cx + segment.outer_radius * cos(end_angle_rad)\n end_y_outer = cy + segment.outer_radius * sin(end_angle_rad)\n\n # Calculate the start and end points for the inner radius\n start_x_inner = cx + segment.inner_radius * cos(start_angle_rad)\n start_y_inner = cy + segment.inner_radius * sin(start_angle_rad)\n end_x_inner = cx + segment.inner_radius * cos(end_angle_rad)\n end_y_inner = cy + segment.inner_radius * sin(end_angle_rad)\n\n # Determine if the arc should be drawn as a large-arc (values >= 180 degrees)\n large_arc_flag = \"1\" if segment.end_angle - segment.start_angle >= 180 else \"0\"\n\n # Create the path for the pie segment without indentation\n path_str = (\n f\"M {start_x_inner} {start_y_inner} \" # Move to start of inner arc\n f\"L {start_x_outer} {start_y_outer} \" # Line to start of outer arc\n f\"A {segment.outer_radius} {segment.outer_radius} 0 {large_arc_flag} 1 {end_x_outer} {end_y_outer} \" # Outer arc\n f\"L {end_x_inner} {end_y_inner} \" # Line to end of inner arc\n f\"A {segment.inner_radius} {segment.inner_radius} 0 {large_arc_flag} 0 {start_x_inner} {start_y_inner} \" # Inner arc (reverse)\n \"Z\"\n )\n\n # Assemble the path and title elements\n path_element = f'<path d=\"{path_str}\" fill=\"{color}\" />\\n'\n escaped_title = html.escape(config.title) # Escape special characters\n\n title_element = f\"<title>{escaped_title}</title>\"\n\n # Combine path and title into one string without adding indentation here\n group_content = f\"{path_element}{title_element}\"\n\n # Check if the segment should be shown as a popup\n if config.show_as_popup:\n # Add JavaScript to handle popup logic\n onclick_action = f\"onclick=\\\"showPopup('{config.url}', evt,this)\\\"\"\n group_content = f\"<g {onclick_action}>{group_content}</g>\"\n elif config.url:\n # Regular link behavior\n group_content = (\n f'<a xlink:href=\"{config.url}\" target=\"_blank\">{group_content}</a>'\n )\n\n # Use add_group to add the pie segment with proper indentation\n self.add_group(\n group_content,\n group_id=config.id,\n group_class=config.element_class,\n level=2,\n comment=config.comment,\n )\n\n def get_java_script(self) -> str:\n \"\"\"\n get the java script code for interactive behavior\n \"\"\"\n popup_script = \"\"\"\n <script>\n function showPopup(url, evt,element) {\n // show a Popup fetching html content from the given url\n // for the given element\n // Handle the selection of the popup element\n selectPopupElement(element);\n var popup = document.getElementById('dcm-svg-popup');\n var iframe = document.getElementById('popup-iframe');\n var svgRect = evt.target.getBoundingClientRect();\n var svg = document.querySelector('svg');\n var svgPoint = svg.createSVGPoint();\n svgPoint.x = evt.clientX - svgRect.left;\n svgPoint.y = evt.clientY - svgRect.top;\n \n // Position the popup near the click event\n popup.setAttribute('x', svgPoint.x);\n popup.setAttribute('y', svgPoint.y);\n // Set the iframe src and make the popup visible\n iframe.setAttribute('src', url);\n popup.setAttribute('visibility', 'visible');\n }\n \n function selectPopupElement(element) {\n var popup = document.getElementById('dcm-svg-popup');\n \n // Deselect the current element if there is one\n if (popup.currentElement) {\n popup.currentElement.classList.remove('selected');\n }\n \n // Select the new element\n if (element) {\n element.classList.add('selected');\n popup.currentElement = element; // Update the reference to the currently selected element\n } else {\n popup.currentElement = null; // Clear the reference if no element is passed\n }\n }\n \n function closePopup() {\n var popup = document.getElementById('dcm-svg-popup');\n popup.setAttribute('visibility', 'hidden');\n // Deselect the element when the popup is closed\n selectPopupElement(null);\n }\n </script>\n \"\"\"\n return popup_script\n\n def get_svg_markup(self, with_java_script: bool = True) -> str:\n \"\"\"\n Generate the complete SVG markup.\n\n Args:\n with_java_script(bool): if True(default) the javascript code is included otherwise\n it's available via the get_java_script function\n\n Returns:\n str: String containing the complete SVG markup.\n \"\"\"\n # Get current date and time\n now = datetime.now()\n formatted_now = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n header = (\n f\"<!-- generated by dcm https://github.com/WolfgangFahl/dcm at {formatted_now} -->\\n\"\n f'<svg xmlns=\"http://www.w3.org/2000/svg\" '\n f'xmlns:xlink=\"http://www.w3.org/1999/xlink\" '\n f'width=\"{self.width}\" height=\"{self.config.total_height}\">\\n'\n )\n popup = \"\"\"\n <!-- Add a foreignObject for the popup -->\n<foreignObject id=\"dcm-svg-popup\" class=\"popup\" width=\"500\" height=\"354\" x=\"150\" y=\"260\" visibility=\"hidden\">\n <body xmlns=\"http://www.w3.org/1999/xhtml\">\n <!-- Content of your popup goes here -->\n <div class=\"popup\" style=\"background-color: white; border: 1px solid black; padding: 10px; box-sizing: border-box; width: 500px; height: 354px; position: relative;\">\n <span onclick=\"closePopup()\" class=\"close-btn\">ⓧ</span>\n <iframe id=\"popup-iframe\" width=\"100%\" height=\"100%\" frameborder=\"0\"></iframe>\n </div>\n </body>\n</foreignObject>\n\"\"\"\n\n styles = self.get_svg_style()\n body = \"\".join(self.elements)\n footer = \"</svg>\"\n java_script = self.get_java_script() if with_java_script else \"\"\n svg_markup = f\"{header}{java_script}{styles}{body}{popup}{footer}\"\n return svg_markup\n\n def save(self, filename: str):\n \"\"\"\n Save the SVG markup to a file.\n\n Args:\n filename (str): Filename to save the SVG markup.\n \"\"\"\n with open(filename, \"w\") as file:\n file.write(self.get_svg_markup())" }, { "identifier": "SVGConfig", "path": "dcm/svg.py", "snippet": "class SVGConfig:\n \"\"\"\n Configuration class for SVG generation.\n\n Attributes:\n width (int): Width of the SVG canvas in pixels.\n height (int): Height of the SVG canvas in pixels.\n legend_height (int): Height reserved for the legend in pixels.\n font (str): Font family for text elements.\n font_size (int): Font size in points for text elements.\n indent (str): Indentation string, default is two spaces.\n default_color (str): Default color code for SVG elements.\n \"\"\"\n\n width: int = 600\n height: int = 600\n legend_height: int = 150\n font: str = \"Arial\"\n font_size: int = 12\n indent: str = \" \"\n default_color: str = \"#C0C0C0\"\n\n @property\n def total_height(self) -> int:\n \"\"\"\n Calculate total height of the SVG canvas including the legend.\n\n Returns:\n int: Total height of the SVG canvas.\n \"\"\"\n return self.height + self.legend_height" }, { "identifier": "Version", "path": "dcm/version.py", "snippet": "class Version:\n \"\"\"\n Version handling for nicepdf\n \"\"\"\n\n name = \"dcm\"\n version = dcm.__version__\n date = \"2023-11-06\"\n updated = \"2024-01-15\"\n description = \"python based visualization of dynamic competence maps\"\n\n authors = \"Wolfgang Fahl\"\n\n doc_url = \"https://wiki.bitplan.com/index.php/dcm\"\n chat_url = \"https://github.com/WolfgangFahl/dcm/discussions\"\n cm_url = \"https://github.com/WolfgangFahl/dcm\"\n\n license = f\"\"\"Copyright 2023 contributors. All rights reserved.\n\n Licensed under the Apache License 2.0\n http://www.apache.org/licenses/LICENSE-2.0\n\n Distributed on an \"AS IS\" basis without warranties\n or conditions of any kind, either express or implied.\"\"\"\n\n longDescription = f\"\"\"{name} version {version}\n{description}\n\n Created by {authors} on {date} last updated {updated}\"\"\"" } ]
import os from typing import Optional from urllib.parse import urlparse from fastapi import HTTPException from fastapi.responses import HTMLResponse from ngwidgets.file_selector import FileSelector from ngwidgets.input_webserver import InputWebserver from ngwidgets.webserver import WebserverConfig from nicegui import Client, app, ui from pydantic import BaseModel from dcm.dcm_assessment import Assessment from dcm.dcm_chart import DcmChart from dcm.dcm_core import CompetenceTree, DynamicCompetenceMap, Learner from dcm.svg import SVG, SVGConfig from dcm.version import Version
12,069
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str config: Optional[SVGConfig] = None class DynamicCompentenceMapWebServer(InputWebserver): """ server to supply Dynamic Competence Map Visualizations """ @classmethod def get_config(cls) -> WebserverConfig: """ get the configuration for this Webserver """ copy_right = "(c)2023-2024 Wolfgang Fahl" config = WebserverConfig(
""" Created on 2023-11-06 @author: wf """ class SVGRenderRequest(BaseModel): """ A request for rendering an SVG. Attributes: name (str): The name of the render request. definition (str): The string representation of the data to be rendered, in either JSON or YAML format. markup (str): The format of the definition ('json' or 'yaml'). config (SVGConfig): Optional configuration for SVG rendering. Defaults to None, which uses default settings. """ name: str definition: str markup: str config: Optional[SVGConfig] = None class DynamicCompentenceMapWebServer(InputWebserver): """ server to supply Dynamic Competence Map Visualizations """ @classmethod def get_config(cls) -> WebserverConfig: """ get the configuration for this Webserver """ copy_right = "(c)2023-2024 Wolfgang Fahl" config = WebserverConfig(
copy_right=copy_right, version=Version(), default_port=8885
7
2023-11-06 09:24:24+00:00
16k
Harvard-Ophthalmology-AI-Lab/FairSeg
SAMed/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "SAMed/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n pixel_mean: List[float] = [123.675, 116.28, 103.53],\n pixel_std: List[float] = [58.395, 57.12, 57.375],\n ) -> None:\n \"\"\"\n SAM predicts object masks from an image and input prompts.\n\n Arguments:\n image_encoder (ImageEncoderViT): The backbone used to encode the\n image into image embeddings that allow for efficient mask prediction.\n prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n and encoded prompts.\n pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n pixel_std (list(float)): Std values for normalizing pixels in the input image.\n \"\"\"\n super().__init__()\n self.image_encoder = image_encoder\n self.prompt_encoder = prompt_encoder\n self.mask_decoder = mask_decoder\n self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n @property\n def device(self) -> Any:\n return self.pixel_mean.device\n\n def forward(self, batched_input, multimask_output, image_size):\n if isinstance(batched_input, list):\n outputs = self.forward_test(batched_input, multimask_output)\n else:\n outputs = self.forward_train(batched_input, multimask_output, image_size)\n return outputs\n\n def forward_train(self, batched_input, multimask_output, image_size):\n input_images = self.preprocess(batched_input)\n image_embeddings = self.image_encoder(input_images)\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=None, boxes=None, masks=None\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=image_embeddings,\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=(image_size, image_size),\n original_size=(image_size, image_size)\n )\n outputs = {\n 'masks': masks,\n 'iou_predictions': iou_predictions,\n 'low_res_logits': low_res_masks\n }\n return outputs\n\n @torch.no_grad()\n def forward_test(\n self,\n batched_input: List[Dict[str, Any]],\n multimask_output: bool,\n ) -> List[Dict[str, torch.Tensor]]:\n \"\"\"\n Predicts masks end-to-end from provided images and prompts.\n If prompts are not known in advance, using SamPredictor is\n recommended over calling the model directly.\n\n Arguments:\n batched_input (list(dict)): A list over input images, each a\n dictionary with the following keys. A prompt key can be\n excluded if it is not present.\n 'image': The image as a torch tensor in 3xHxW format,\n already transformed for input to the model.\n 'original_size': (tuple(int, int)) The original size of\n the image before transformation, as (H, W).\n 'point_coords': (torch.Tensor) Batched point prompts for\n this image, with shape BxNx2. Already transformed to the\n input frame of the model.\n 'point_labels': (torch.Tensor) Batched labels for point prompts,\n with shape BxN.\n 'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n Already transformed to the input frame of the model.\n 'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n in the form Bx1xHxW.\n multimask_output (bool): Whether the model should predict multiple\n disambiguating masks, or return a single mask.\n\n Returns:\n (list(dict)): A list over input images, where each element is\n as dictionary with the following keys.\n 'masks': (torch.Tensor) Batched binary mask predictions,\n with shape BxCxHxW, where B is the number of input promts,\n C is determiend by multimask_output, and (H, W) is the\n original size of the image.\n 'iou_predictions': (torch.Tensor) The model's predictions\n of mask quality, in shape BxC.\n 'low_res_logits': (torch.Tensor) Low resolution logits with\n shape BxCxHxW, where H=W=256. Can be passed as mask input\n to subsequent iterations of prediction.\n \"\"\"\n input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n image_embeddings = self.image_encoder(input_images)\n\n outputs = []\n for image_record, curr_embedding in zip(batched_input, image_embeddings):\n if \"point_coords\" in image_record:\n points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n else:\n points = None\n sparse_embeddings, dense_embeddings = self.prompt_encoder(\n points=points,\n boxes=image_record.get(\"boxes\", None),\n masks=image_record.get(\"mask_inputs\", None),\n )\n low_res_masks, iou_predictions = self.mask_decoder(\n image_embeddings=curr_embedding.unsqueeze(0),\n image_pe=self.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n masks = self.postprocess_masks(\n low_res_masks,\n input_size=image_record[\"image\"].shape[-2:],\n original_size=image_record[\"original_size\"],\n )\n masks = masks > self.mask_threshold\n outputs.append(\n {\n \"masks\": masks,\n \"iou_predictions\": iou_predictions,\n \"low_res_logits\": low_res_masks,\n }\n )\n return outputs\n\n def postprocess_masks(\n self,\n masks: torch.Tensor,\n input_size: Tuple[int, ...],\n original_size: Tuple[int, ...],\n ) -> torch.Tensor:\n \"\"\"\n Remove padding and upscale masks to the original image size.\n\n Arguments:\n masks (torch.Tensor): Batched masks from the mask_decoder,\n in BxCxHxW format.\n input_size (tuple(int, int)): The size of the image input to the\n model, in (H, W) format. Used to remove padding.\n original_size (tuple(int, int)): The original size of the image\n before resizing for input to the model, in (H, W) format.\n\n Returns:\n (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n is given by original_size.\n \"\"\"\n masks = F.interpolate(\n masks,\n (self.image_encoder.img_size, self.image_encoder.img_size),\n mode=\"bilinear\",\n align_corners=False,\n )\n masks = masks[..., : input_size[0], : input_size[1]]\n masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n return masks\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.image_encoder.img_size - h\n padw = self.image_encoder.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x" }, { "identifier": "SamPredictor", "path": "SAMed/segment_anything/predictor.py", "snippet": "class SamPredictor:\n def __init__(\n self,\n sam_model: Sam,\n ) -> None:\n \"\"\"\n Uses SAM to calculate the image embedding for an image, and then\n allow repeated, efficient mask prediction given prompts.\n\n Arguments:\n sam_model (Sam): The model to use for mask prediction.\n \"\"\"\n super().__init__()\n self.model = sam_model\n self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n self.reset_image()\n\n def set_image(\n self,\n image: np.ndarray,\n image_format: str = \"RGB\",\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method.\n\n Arguments:\n image (np.ndarray): The image for calculating masks. Expects an\n image in HWC uint8 format, with pixel values in [0, 255].\n image_format (str): The color format of the image, in ['RGB', 'BGR'].\n \"\"\"\n assert image_format in [\n \"RGB\",\n \"BGR\",\n ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n if image_format != self.model.image_format:\n image = image[..., ::-1]\n\n # Transform the image to the form expected by the model\n input_image = self.transform.apply_image(image)\n input_image_torch = torch.as_tensor(input_image, device=self.device)\n input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n self.set_torch_image(input_image_torch, image.shape[:2])\n\n @torch.no_grad()\n def set_torch_image(\n self,\n transformed_image: torch.Tensor,\n original_image_size: Tuple[int, ...],\n ) -> None:\n \"\"\"\n Calculates the image embeddings for the provided image, allowing\n masks to be predicted with the 'predict' method. Expects the input\n image to be already transformed to the format expected by the model.\n\n Arguments:\n transformed_image (torch.Tensor): The input image, with shape\n 1x3xHxW, which has been transformed with ResizeLongestSide.\n original_image_size (tuple(int, int)): The size of the image\n before transformation, in (H, W) format.\n \"\"\"\n assert (\n len(transformed_image.shape) == 4\n and transformed_image.shape[1] == 3\n and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n self.reset_image()\n\n self.original_size = original_image_size\n self.input_size = tuple(transformed_image.shape[-2:])\n input_image = self.model.preprocess(transformed_image)\n self.features = self.model.image_encoder(input_image)\n self.is_image_set = True\n\n def predict(\n self,\n point_coords: Optional[np.ndarray] = None,\n point_labels: Optional[np.ndarray] = None,\n box: Optional[np.ndarray] = None,\n mask_input: Optional[np.ndarray] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n\n Arguments:\n point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (np.ndarray or None): A length N array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A length 4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form 1xHxW, where\n for SAM, H=W=256.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (np.ndarray): The output masks in CxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (np.ndarray): An array of length C containing the model's\n predictions for the quality of each mask.\n (np.ndarray): An array of shape CxHxW, where C is the number\n of masks and H=W=256. These low resolution logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n # Transform input prompts\n coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n if point_coords is not None:\n assert (\n point_labels is not None\n ), \"point_labels must be supplied if point_coords is supplied.\"\n point_coords = self.transform.apply_coords(point_coords, self.original_size)\n coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n if box is not None:\n box = self.transform.apply_boxes(box, self.original_size)\n box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n box_torch = box_torch[None, :]\n if mask_input is not None:\n mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n mask_input_torch = mask_input_torch[None, :, :, :]\n\n masks, iou_predictions, low_res_masks = self.predict_torch(\n coords_torch,\n labels_torch,\n box_torch,\n mask_input_torch,\n multimask_output,\n return_logits=return_logits,\n )\n\n masks = masks[0].detach().cpu().numpy()\n iou_predictions = iou_predictions[0].detach().cpu().numpy()\n low_res_masks = low_res_masks[0].detach().cpu().numpy()\n return masks, iou_predictions, low_res_masks\n\n @torch.no_grad()\n def predict_torch(\n self,\n point_coords: Optional[torch.Tensor],\n point_labels: Optional[torch.Tensor],\n boxes: Optional[torch.Tensor] = None,\n mask_input: Optional[torch.Tensor] = None,\n multimask_output: bool = True,\n return_logits: bool = False,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"\n Predict masks for the given input prompts, using the currently set image.\n Input prompts are batched torch tensors and are expected to already be\n transformed to the input frame using ResizeLongestSide.\n\n Arguments:\n point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n model. Each point is in (X,Y) in pixels.\n point_labels (torch.Tensor or None): A BxN array of labels for the\n point prompts. 1 indicates a foreground point and 0 indicates a\n background point.\n box (np.ndarray or None): A Bx4 array given a box prompt to the\n model, in XYXY format.\n mask_input (np.ndarray): A low resolution mask input to the model, typically\n coming from a previous prediction iteration. Has form Bx1xHxW, where\n for SAM, H=W=256. Masks returned by a previous iteration of the\n predict method do not need further transformation.\n multimask_output (bool): If true, the model will return three masks.\n For ambiguous input prompts (such as a single click), this will often\n produce better masks than a single prediction. If only a single\n mask is needed, the model's predicted quality score can be used\n to select the best mask. For non-ambiguous prompts, such as multiple\n input prompts, multimask_output=False can give better results.\n return_logits (bool): If true, returns un-thresholded masks logits\n instead of a binary mask.\n\n Returns:\n (torch.Tensor): The output masks in BxCxHxW format, where C is the\n number of masks, and (H, W) is the original image size.\n (torch.Tensor): An array of shape BxC containing the model's\n predictions for the quality of each mask.\n (torch.Tensor): An array of shape BxCxHxW, where C is the number\n of masks and H=W=256. These low res logits can be passed to\n a subsequent iteration as mask input.\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n if point_coords is not None:\n points = (point_coords, point_labels)\n else:\n points = None\n\n # Embed prompts\n sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n points=points,\n boxes=boxes,\n masks=mask_input,\n )\n\n # Predict masks\n low_res_masks, iou_predictions = self.model.mask_decoder(\n image_embeddings=self.features,\n image_pe=self.model.prompt_encoder.get_dense_pe(),\n sparse_prompt_embeddings=sparse_embeddings,\n dense_prompt_embeddings=dense_embeddings,\n multimask_output=multimask_output,\n )\n\n # Upscale the masks to the original image resolution\n masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n if not return_logits:\n masks = masks > self.model.mask_threshold\n\n return masks, iou_predictions, low_res_masks\n\n def get_image_embedding(self) -> torch.Tensor:\n \"\"\"\n Returns the image embeddings for the currently set image, with\n shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n the embedding spatial dimension of SAM (typically C=256, H=W=64).\n \"\"\"\n if not self.is_image_set:\n raise RuntimeError(\n \"An image must be set with .set_image(...) to generate an embedding.\"\n )\n assert self.features is not None, \"Features must exist if an image has been set.\"\n return self.features\n\n @property\n def device(self) -> torch.device:\n return self.model.device\n\n def reset_image(self) -> None:\n \"\"\"Resets the currently set image.\"\"\"\n self.is_image_set = False\n self.features = None\n self.orig_h = None\n self.orig_w = None\n self.input_h = None\n self.input_w = None" }, { "identifier": "MaskData", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "class MaskData:\n \"\"\"\n A structure for storing masks and their related data in batched format.\n Implements basic filtering and concatenation.\n \"\"\"\n\n def __init__(self, **kwargs) -> None:\n for v in kwargs.values():\n assert isinstance(\n v, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats = dict(**kwargs)\n\n def __setitem__(self, key: str, item: Any) -> None:\n assert isinstance(\n item, (list, np.ndarray, torch.Tensor)\n ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n self._stats[key] = item\n\n def __delitem__(self, key: str) -> None:\n del self._stats[key]\n\n def __getitem__(self, key: str) -> Any:\n return self._stats[key]\n\n def items(self) -> ItemsView[str, Any]:\n return self._stats.items()\n\n def filter(self, keep: torch.Tensor) -> None:\n for k, v in self._stats.items():\n if v is None:\n self._stats[k] = None\n elif isinstance(v, torch.Tensor):\n self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n elif isinstance(v, np.ndarray):\n self._stats[k] = v[keep.detach().cpu().numpy()]\n elif isinstance(v, list) and keep.dtype == torch.bool:\n self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n elif isinstance(v, list):\n self._stats[k] = [v[i] for i in keep]\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def cat(self, new_stats: \"MaskData\") -> None:\n for k, v in new_stats.items():\n if k not in self._stats or self._stats[k] is None:\n self._stats[k] = deepcopy(v)\n elif isinstance(v, torch.Tensor):\n self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n elif isinstance(v, np.ndarray):\n self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n elif isinstance(v, list):\n self._stats[k] = self._stats[k] + deepcopy(v)\n else:\n raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n def to_numpy(self) -> None:\n for k, v in self._stats.items():\n if isinstance(v, torch.Tensor):\n self._stats[k] = v.detach().cpu().numpy()" }, { "identifier": "area_from_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def area_from_rle(rle: Dict[str, Any]) -> int:\n return sum(rle[\"counts\"][1::2])" }, { "identifier": "batch_iterator", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n assert len(args) > 0 and all(\n len(a) == len(args[0]) for a in args\n ), \"Batched iteration must have inputs of all the same size.\"\n n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n for b in range(n_batches):\n yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]" }, { "identifier": "batched_mask_to_box", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n \"\"\"\n # torch.max below raises an error on empty inputs, just skip in this case\n if torch.numel(masks) == 0:\n return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n # Normalize shape to CxHxW\n shape = masks.shape\n h, w = shape[-2:]\n if len(shape) > 2:\n masks = masks.flatten(0, -3)\n else:\n masks = masks.unsqueeze(0)\n\n # Get top and bottom edges\n in_height, _ = torch.max(masks, dim=-1)\n in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n in_height_coords = in_height_coords + h * (~in_height)\n top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n # Get left and right edges\n in_width, _ = torch.max(masks, dim=-2)\n in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n right_edges, _ = torch.max(in_width_coords, dim=-1)\n in_width_coords = in_width_coords + w * (~in_width)\n left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n # If the mask is empty the right edge will be to the left of the left edge.\n # Replace these boxes with [0, 0, 0, 0]\n empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n out = out * (~empty_filter).unsqueeze(-1)\n\n # Return to original shape\n if len(shape) > 2:\n out = out.reshape(*shape[:-2], 4)\n else:\n out = out[0]\n\n return out" }, { "identifier": "box_xyxy_to_xywh", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n box_xywh = deepcopy(box_xyxy)\n box_xywh[2] = box_xywh[2] - box_xywh[0]\n box_xywh[3] = box_xywh[3] - box_xywh[1]\n return box_xywh" }, { "identifier": "build_all_layer_point_grids", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def build_all_layer_point_grids(\n n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n \"\"\"Generates point grids for all crop layers.\"\"\"\n points_by_layer = []\n for i in range(n_layers + 1):\n n_points = int(n_per_side / (scale_per_layer**i))\n points_by_layer.append(build_point_grid(n_points))\n return points_by_layer" }, { "identifier": "calculate_stability_score", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def calculate_stability_score(\n masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n \"\"\"\n Computes the stability score for a batch of masks. The stability\n score is the IoU between the binary masks obtained by thresholding\n the predicted mask logits at high and low values.\n \"\"\"\n # One mask is always contained inside the other.\n # Save memory by preventing unnecesary cast to torch.int64\n intersections = (\n (masks > (mask_threshold + threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n unions = (\n (masks > (mask_threshold - threshold_offset))\n .sum(-1, dtype=torch.int16)\n .sum(-1, dtype=torch.int32)\n )\n return intersections / unions" }, { "identifier": "coco_encode_rle", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n from pycocotools import mask as mask_utils # type: ignore\n\n h, w = uncompressed_rle[\"size\"]\n rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\") # Necessary to serialize with json\n return rle" }, { "identifier": "generate_crop_boxes", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def generate_crop_boxes(\n im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n \"\"\"\n Generates a list of crop boxes of different sizes. Each layer\n has (2**i)**2 boxes for the ith layer.\n \"\"\"\n crop_boxes, layer_idxs = [], []\n im_h, im_w = im_size\n short_side = min(im_h, im_w)\n\n # Original image\n crop_boxes.append([0, 0, im_w, im_h])\n layer_idxs.append(0)\n\n def crop_len(orig_len, n_crops, overlap):\n return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n for i_layer in range(n_layers):\n n_crops_per_side = 2 ** (i_layer + 1)\n overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n crop_w = crop_len(im_w, n_crops_per_side, overlap)\n crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n # Crops in XYWH format\n for x0, y0 in product(crop_box_x0, crop_box_y0):\n box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n crop_boxes.append(box)\n layer_idxs.append(i_layer + 1)\n\n return crop_boxes, layer_idxs" }, { "identifier": "is_box_near_crop_edge", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def is_box_near_crop_edge(\n boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n return torch.any(near_crop_edge, dim=1)" }, { "identifier": "mask_to_rle_pytorch", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n \"\"\"\n Encodes masks to an uncompressed RLE, in the format expected by\n pycoco tools.\n \"\"\"\n # Put in fortran order and flatten h,w\n b, h, w = tensor.shape\n tensor = tensor.permute(0, 2, 1).flatten(1)\n\n # Compute change indices\n diff = tensor[:, 1:] ^ tensor[:, :-1]\n change_indices = diff.nonzero()\n\n # Encode run length\n out = []\n for i in range(b):\n cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n cur_idxs = torch.cat(\n [\n torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n cur_idxs + 1,\n torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n ]\n )\n btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n counts = [] if tensor[i, 0] == 0 else [0]\n counts.extend(btw_idxs.detach().cpu().tolist())\n out.append({\"size\": [h, w], \"counts\": counts})\n return out" }, { "identifier": "remove_small_regions", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def remove_small_regions(\n mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Removes small disconnected regions and holes in a mask. Returns the\n mask and an indicator of if the mask has been modified.\n \"\"\"\n import cv2 # type: ignore\n\n assert mode in [\"holes\", \"islands\"]\n correct_holes = mode == \"holes\"\n working_mask = (correct_holes ^ mask).astype(np.uint8)\n n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n sizes = stats[:, -1][1:] # Row 0 is background label\n small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n if len(small_regions) == 0:\n return mask, False\n fill_labels = [0] + small_regions\n if not correct_holes:\n fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n # If every region is below threshold, keep largest\n if len(fill_labels) == 0:\n fill_labels = [int(np.argmax(sizes)) + 1]\n mask = np.isin(regions, fill_labels)\n return mask, True" }, { "identifier": "rle_to_mask", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n h, w = rle[\"size\"]\n mask = np.empty(h * w, dtype=bool)\n idx = 0\n parity = False\n for count in rle[\"counts\"]:\n mask[idx : idx + count] = parity\n idx += count\n parity ^= True\n mask = mask.reshape(w, h)\n return mask.transpose() # Put in C order" }, { "identifier": "uncrop_boxes_xyxy", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n # Check if boxes has a channel dimension\n if len(boxes.shape) == 3:\n offset = offset.unsqueeze(1)\n return boxes + offset" }, { "identifier": "uncrop_masks", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_masks(\n masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n x0, y0, x1, y1 = crop_box\n if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n return masks\n # Coordinate transform masks\n pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n pad = (x0, pad_x - x0, y0, pad_y - y0)\n return torch.nn.functional.pad(masks, pad, value=0)" }, { "identifier": "uncrop_points", "path": "SAMed/segment_anything/utils/amg.py", "snippet": "def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n x0, y0, _, _ = crop_box\n offset = torch.tensor([[x0, y0]], device=points.device)\n # Check if points has a channel dimension\n if len(points.shape) == 3:\n offset = offset.unsqueeze(1)\n return points + offset" } ]
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
10,911
Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crops_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crops_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros(len(data["boxes"])), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros(len(data["boxes"])), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
data["points"] = uncrop_points(data["points"], crop_box)
17
2023-11-03 17:05:40+00:00
16k
microsoft/PLEX
PLEX/finetuning.py
[ { "identifier": "TrajectoryDataset", "path": "PLEX/util/data.py", "snippet": "class TrajectoryDataset:\n def __init__(self, trajectories, camera_names, contextual):\n self.trajectories = list(trajectories)\n if not globals.full_state_mode:\n self.camera_names = camera_names\n self.traj_lens = np.array([traj['len'] for traj in self.trajectories])\n else:\n self.camera_names = None\n self.traj_lens = np.array([len(traj['full_state']) for traj in self.trajectories])\n self.contextual = contextual\n\n if len(self.trajectories) == 0:\n return\n\n self.p_sample = self.traj_lens / np.sum(self.traj_lens)\n\n proto_traj = self.trajectories[0]\n proto_traj = proto_traj['load_images'](proto_traj)\n if not globals.full_state_mode:\n self.image_dims = None\n\n for cam in self.camera_names:\n image_dims = proto_traj['image'][cam].shape[1:]\n if self.image_dims is None:\n self.image_dims = image_dims\n else:\n assert np.all(self.image_dims == image_dims) or np.all(image_dims == ()), f'Images from a given cam should all be None or have the same size as from other cams. Other cams\\' image size: {self.image_dims}, this cam\\'s image size is {image_dims}.'\n else:\n self.full_state_dim = proto_traj['full_state'].shape[1]\n\n # Check for existence of optional keys\n self.has_proprios = 'proprio' in proto_traj\n self.has_actions = 'action' in proto_traj\n self.has_rewards = 'reward' in proto_traj\n\n if self.has_proprios:\n assert not globals.full_state_mode, 'We shouldn\\'t be using proprios in full-state mode.'\n self.proprio_dim = proto_traj['proprio'].shape[1]\n for traj in trajectories:\n assert traj['proprio'].shape[1] == self.proprio_dim\n\n if self.has_actions:\n self.action_dim = proto_traj['action'].shape[1]\n for traj in trajectories:\n assert traj['action'].shape[1] == self.action_dim\n\n def __len__(self):\n return len(self.trajectories)\n\n @property\n def video_only(self):\n return not self.has_actions and not self.has_proprios and not self.has_rewards and not globals.full_state_mode\n\n def copy_frames(self, src, actual_trg_len, rate_ratio, raise_frame_rate, pad_frame_gaps):\n # Copies data from a source array, adjusting frame rates as necessary.\n\n # Allocate the destination array to be same shape as the source array,\n # except for the first dimension (time), which must be actual_trg_len.\n trg_data = np.zeros((actual_trg_len, *src.shape[1:]), dtype=src.dtype)\n actual_src_len = len(src)\n\n if rate_ratio == 1:\n # The frame rates match. Do a direct copy.\n trg_data[:] = src[:actual_src_len]\n elif raise_frame_rate:\n # The source frame rate is too low. Copy source items as needed.\n for i in range(rate_ratio):\n new_src_len = len(trg_data[i::rate_ratio])\n trg_data[i::rate_ratio] = src[:new_src_len]\n if pad_frame_gaps:\n break # Leave zeros in the intervening frames.\n else:\n # The source frame rate is too high. Skip the unneeded items.\n trg_data[:] = src[0:rate_ratio * actual_src_len:rate_ratio]\n return trg_data\n\n def sample_batch(self, batch_size, target_frame_rate, pad_frame_gaps, max_len, get_context, discount, device=globals.DEFAULT_DEVICE, context_from_same_traj=False):\n assert len(self.trajectories) > 0\n # We should probably factor out the code that maps trajectories to tasks so that this computation is done only once, not every time a batch is sampled.\n task_name2traj_idx_dict = {}\n\n for i in range(len(self.trajectories)):\n if self.trajectories[i]['task_info'].name in task_name2traj_idx_dict.keys():\n task_name2traj_idx_dict[self.trajectories[i]['task_info'].name].append(i)\n else:\n task_name2traj_idx_dict[self.trajectories[i]['task_info'].name] = [i]\n\n batch_inds = np.random.choice(\n np.arange(len(self.trajectories)),\n size=batch_size,\n replace=True,\n p=self.p_sample # reweights so we sample according to timesteps\n )\n\n if not globals.full_state_mode:\n images = {cam: [] for cam in self.camera_names}\n contexts = {cam: [] for cam in self.camera_names} if self.contextual else None\n proprios = [] if self.has_proprios else None\n else:\n full_states = []\n contexts = []\n proprios = None\n\n masks = []\n actions = [] if self.has_actions else None\n rewards, returns = ([], []) if self.has_rewards else (None, None)\n timesteps = []\n\n for batch_index in range(batch_size):\n traj = None\n traj_len = -1\n is_valid = False\n\n while not is_valid:\n traj = self.trajectories[batch_inds[batch_index]]\n traj_len = traj['len'] if not globals.full_state_mode else len(traj['full_state'])\n\n if self.contextual:\n MAX_RETRIES = 3\n retry_ctr = 0\n while not is_valid and retry_ctr < MAX_RETRIES:\n retry_ctr += 1\n if self.video_only:\n # Choose a context from the same trajectory\n ctx, is_valid = get_context(traj, 0, traj_len)\n else:\n # Choose a context from another random trajectory **of the same task**.\n if context_from_same_traj:\n ctx_traj = traj\n else:\n ctx_traj_idx = task_name2traj_idx_dict[traj['task_info'].name][randrange(len(task_name2traj_idx_dict[traj['task_info'].name]))]\n ctx_traj = self.trajectories[ctx_traj_idx]\n ctx_traj_len = ctx_traj['len'] if not globals.full_state_mode else len(ctx_traj['full_state'])\n ctx, is_valid = get_context(ctx_traj, 0, ctx_traj_len)\n\n if is_valid and retry_ctr > 1:\n print(f'Found a valid context only on the {retry_ctr}th attempt...')\n\n if not is_valid:\n # Sample a different trajectory\n batch_inds[batch_index] = np.random.choice(\n np.arange(len(self.trajectories)),\n size=1,\n replace=True,\n p=self.p_sample # reweights so we sample according to timesteps\n )[0]\n continue\n\n if not globals.full_state_mode:\n for cam in self.camera_names:\n contexts[cam].append(ctx[cam][np.newaxis])\n else:\n contexts.append(ctx[np.newaxis])\n else:\n # Non-contexttual trajectories don't need a context, by definition, so we'll just oveeride the context validity check.\n is_valid = True\n\n src_end = random.randint(1, traj_len)\n data_frame_rate = traj['task_info'].frame_rate # Source fps.\n max_trg_len = max_len # trg refers to target arrays that will be returned.\n\n assert (data_frame_rate is None) or (target_frame_rate is None) or (\n data_frame_rate == target_frame_rate) or self.video_only, \\\n \"For now, the target and data frame rates can be different only for video-only data.\"\n\n if (data_frame_rate is None) or (target_frame_rate is None) or (data_frame_rate == target_frame_rate):\n # The frame rates match. Do a direct copy.\n rate_ratio = 1\n raise_frame_rate = False\n max_src_len = max_trg_len\n src_start = max(0, src_end - max_src_len)\n actual_src_len = src_end - src_start\n trg_start = src_start\n actual_trg_len = actual_src_len\n elif data_frame_rate < target_frame_rate:\n # The source frame rate is too low. Copy each source item (or pad with zeros) as many times as needed.\n rate_ratio = target_frame_rate // data_frame_rate\n raise_frame_rate = True\n max_src_len = math.ceil(max_trg_len / rate_ratio) # Fewer source frames will be needed.\n src_start = max(0, src_end - max_src_len)\n actual_src_len = src_end - src_start\n trg_start = src_start * rate_ratio\n actual_trg_len = min(max_trg_len, actual_src_len * rate_ratio)\n else: # data_frame_rate > target_frame_rate\n # The source frame rate is too high. Skip the unneeded items.\n rate_ratio = data_frame_rate // target_frame_rate\n raise_frame_rate = False\n max_src_len = max_trg_len * rate_ratio # Some source frames will be dropped.\n src_start = max(0, src_end - max_src_len)\n actual_src_len = src_end - src_start\n trg_start = src_start // rate_ratio\n actual_trg_len = min(max_trg_len, (actual_src_len + rate_ratio - 1) // rate_ratio)\n\n trg_end = trg_start + actual_trg_len\n\n if not globals.full_state_mode:\n for cam in self.camera_names:\n traj = traj['load_images'](traj, start_idx=src_start, end_idx=src_end)\n subseq = traj['image'][cam][src_start:src_end]\n trg_data = self.copy_frames(subseq, actual_trg_len, rate_ratio, raise_frame_rate, pad_frame_gaps)\n images[cam].append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, *self.image_dims)),\n trg_data.reshape(1, actual_trg_len, *self.image_dims)\n ))\n if self.has_proprios:\n proprios.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, self.proprio_dim)),\n traj['proprio'][src_start:src_end].reshape(1, actual_trg_len, self.proprio_dim)\n ))\n else:\n full_states.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, self.full_state_dim)),\n traj['full_state'][src_start:src_end].reshape(1, actual_trg_len, self.full_state_dim)\n ))\n\n if self.has_actions:\n # Why the * -10?\n actions.append(cat1(\n np.ones((1, max_trg_len - actual_trg_len, self.action_dim)) * -10.,\n traj['action'][src_start:src_end].reshape(1, actual_trg_len, self.action_dim)\n ))\n\n if self.has_rewards:\n rewards.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, 1)),\n traj['reward'][src_start:src_end].reshape(1, actual_trg_len, 1)\n ))\n if 'rtg' in traj:\n returns.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, 1)),\n traj['rtg'][src_start:src_end].reshape(1, actual_trg_len, 1)\n ))\n else:\n rtgs = discount_cumsum(traj['reward'][src_start:], traj['success'][-1], gamma=discount)\n returns.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len, 1)),\n rtgs[:actual_trg_len].reshape(1, actual_trg_len, 1)\n ))\n\n timesteps.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len), dtype=np.long),\n np.arange(trg_start, trg_end, dtype=np.long)[np.newaxis],\n dtype=np.long\n ))\n\n masks.append(cat1(\n np.zeros((1, max_trg_len - actual_trg_len)),\n np.ones((1, actual_trg_len))\n ))\n\n return [\n torchify(x, device)\n for x in (contexts, (images if not globals.full_state_mode else full_states), proprios, actions, rewards, returns, timesteps, masks)\n ]" }, { "identifier": "load_data", "path": "PLEX/util/data.py", "snippet": "def load_data(log, data_dir, tasks, max_trajectories, **kwargs):\n all_trajectories = {}\n for task, max_traj in zip(tasks, max_trajectories):\n if task.dataset_type == 'robomimic' or task.dataset_type == 'robosuite' or task.dataset_type == 'libero':\n trajectories = load_robomimic_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n elif task.dataset_type == 'metaworld':\n trajectories = load_metaworld_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n elif task.dataset_type == 'bridge' or task.dataset_type == 'bridge-v2':\n trajectories = load_bridge_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n elif task.dataset_type == 'd4rl':\n trajectories = load_d4rl_data(log, data_dir, task, max_trajectories=max_traj, **kwargs)\n else:\n assert False, 'Unknown dataset type {} for task {}'.format(task.dataset_type, task)\n\n for task_name in trajectories:\n if task_name in all_trajectories:\n # This may happen if we are loading several sub-datasets for the same task, e.g., \"ph\" and \"mh\" subdatasets in robomimic\n # NOTE: Max trajectories limit should probably apply to *all* trajectories of this task but currently applies on a per-directory basis.\n all_trajectories[task_name].extend(trajectories[task_name])\n else:\n all_trajectories[task_name] = trajectories[task_name]\n return all_trajectories" }, { "identifier": "setup_batch_sampler", "path": "PLEX/util/data.py", "snippet": "def setup_batch_sampler(dataset, context_style, cmdline_args, device):\n context_fn = setup_context_sampler(context_style) if dataset.contextual else lambda *args, **kwargs: None\n return lambda batch_size, target_frame_rate, pad_frame_gaps: dataset.sample_batch(batch_size,\n target_frame_rate,\n pad_frame_gaps,\n max_len=((cmdline_args['obs_pred.K'] + cmdline_args['future_step']) if cmdline_args['model'] == 'PLEX' else cmdline_args['K']),\n get_context=context_fn,\n discount=cmdline_args['discount'],\n device=device,\n context_from_same_traj=cmdline_args['context_from_same_traj'])" }, { "identifier": "train_val_split", "path": "PLEX/util/data.py", "snippet": "def train_val_split(items, val_frac):\n items = list(items)\n n_total = len(items)\n train_val_split_rng.shuffle(items)\n n_val = round(val_frac * n_total)\n return items[n_val:], items[:n_val]" }, { "identifier": "parse_tasks", "path": "PLEX/util/misc.py", "snippet": "def parse_tasks(task_spec_str, robot=None, global_max_traj=None):\n if task_spec_str is None or task_spec_str == 'None':\n return [], []\n\n task_specs = parse_comma_sep_param_value(task_spec_str)\n descriptors = []\n max_trajs = []\n for task_spec in task_specs:\n if task_spec.startswith('(') and task_spec.endswith(')'):\n task_spec, max_traj = [part.strip('(): ') for part in task_spec.split(':')]\n max_trajs.append(int(max_traj))\n else:\n max_trajs.append(global_max_traj)\n\n if robot is None:\n task = task_spec\n else:\n # --TARGET_ROBOT-- is a reserved token that can't be used to name an actual robot.\n task = task_spec.replace('--TARGET_ROBOT--', robot)\n assert task != task_spec, 'Invalid task directory string: {}. Needs to contain the \\\"--TARGET_ROBOT--\\\" token'.format(task)\n\n descriptors.append(TaskDescriptor(task))\n return descriptors, max_trajs" }, { "identifier": "setup_essentials", "path": "PLEX/util/misc.py", "snippet": "def setup_essentials(cmdline_args):\n set_seed(cmdline_args['seed'])\n data_shuffling_rng = np.random.RandomState(cmdline_args['seed'])\n log = setup_logging(cmdline_args)\n device = cmdline_args.get('device', 'cuda')\n log_to_wandb = cmdline_args.get('log_to_wandb', False)\n timer = Timer(log)\n\n camera_names = parse_comma_sep_param_value(cmdline_args['camera_names'])\n\n # Very important! This sets up observation preprocessing (such as resizing images to a desired size and swapping their format from HWC to CWH)\n # that will be done by the robomimic library to specified observation types when these observations are loaded from robomimic's h5py files or\n # generated by robosuite.\n if 'FULL_STATE' in camera_names:\n assert len(camera_names) == 1, \"If FULL_STATE is present among camera names, it must be the only camera name.\"\n globals.full_state_mode = True\n else:\n globals.full_state_mode = False\n\n if not globals.full_state_mode:\n init_obs_preprocessing(camera_names, cmdline_args['image_size'])\n\n modalities_to_mask = parse_comma_sep_param_value(cmdline_args['modalities_to_mask'])\n data_dir = construct_data_dir_path(cmdline_args)\n common_env_metadata_dict = {'robosuite': None, 'metaworld': None, 'bridge': None}\n\n for modality in modalities_to_mask:\n assert modality in globals.MODALITIES\n\n return log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict" }, { "identifier": "setup_model", "path": "PLEX/util/misc.py", "snippet": "def setup_model(cmdline_args, example_task, log, device, camera_names, modalities_to_mask, data_dir, bc_mode):\n obs_dims, proprio_dim, action_dim = get_robot_dims(example_task, camera_names, cmdline_args['image_size'])\n pretrained_state_dict = {}\n\n # Load pretrained weights, if applicable\n load_path = cmdline_args['load_path']\n if load_path is not None:\n load_path = load_path.replace('--TARGET_ROBOT--', cmdline_args['robot'])\n log(f'Loading pretrained weights from {load_path}')\n pretrained_state_dict = torch.load(load_path)\n\n std_bounds = (cmdline_args['std_min'], cmdline_args['std_max'])\n\n tune_style_kwargs = {}\n tune_style_kwargs['image_encoder_tune_style'] = cmdline_args['image_encoder_tune_style']\n\n if cmdline_args['model'] == 'PLEX':\n assert cmdline_args['obs_pred.K'] is not None\n assert cmdline_args['inv_d_pred.K'] is not None\n assert cmdline_args['obs_pred.K'] >= cmdline_args['inv_d_pred.K']\n assert cmdline_args['obs_pred.K'] % cmdline_args['inv_d_pred.K'] == 0\n obs_pred_gpt2_kwargs = dict(\n n_layer=cmdline_args['obs_pred.n_layer'],\n n_head=cmdline_args['obs_pred.n_head'],\n K=cmdline_args['obs_pred.K'],\n activation_function=cmdline_args['activation_function'],\n resid_pdrop=cmdline_args['dropout'],\n attn_pdrop=cmdline_args['dropout']\n )\n inv_d_pred_gpt2_kwargs = dict(\n n_layer=cmdline_args['inv_d_pred.n_layer'],\n n_head=cmdline_args['inv_d_pred.n_head'],\n K=cmdline_args['inv_d_pred.K'],\n activation_function=cmdline_args['activation_function'],\n resid_pdrop=cmdline_args['dropout'],\n attn_pdrop=cmdline_args['dropout']\n )\n\n model = PLEX(\n camera_names=camera_names,\n obs_dims=obs_dims,\n proprio_dim=proprio_dim,\n act_dim=action_dim,\n hidden_dim=cmdline_args['embed_dim'],\n # The history length for this model is always the observation prediction model's history length:\n history_len=cmdline_args['obs_pred.K'],\n image_encoder_arch=cmdline_args['image_encoder_arch'],\n image_encoder_load=cmdline_args['image_encoder_load'],\n use_random_crops=True,\n pool_type=cmdline_args['pool_type'],\n action_output_type=cmdline_args['action_output_type'],\n impute_style=cmdline_args['impute_style'],\n data_dir=data_dir,\n relative_position_encodings=cmdline_args['relative_position_encodings'],\n future_step=cmdline_args['future_step'],\n std_bounds=std_bounds,\n obs_pred_gpt2_kwargs=obs_pred_gpt2_kwargs,\n inv_d_pred_gpt2_kwargs=inv_d_pred_gpt2_kwargs,\n modalities_to_mask=modalities_to_mask,\n bc_mode=bc_mode\n ).to(device=device)\n\n # Record the tune style parameters\n tune_style_kwargs['obs_pred_transformer_tune_style'] = cmdline_args['obs_pred.transformer_tune_style']\n tune_style_kwargs['inv_d_pred_transformer_tune_style'] = cmdline_args['inv_d_pred.transformer_tune_style']\n\n elif cmdline_args['model'] == 'DT':\n # Configure the model\n gpt2_kwargs = dict(\n n_layer=cmdline_args['n_layer'],\n n_head=cmdline_args['n_head'],\n activation_function=cmdline_args['activation_function'],\n resid_pdrop=cmdline_args['dropout'],\n attn_pdrop=cmdline_args['dropout'],\n relative_position_encodings=cmdline_args['relative_position_encodings']\n )\n\n model = DecisionTransformer(\n camera_names=camera_names,\n obs_dims=obs_dims,\n proprio_dim=proprio_dim,\n act_dim=action_dim,\n hidden_dim=cmdline_args['embed_dim'],\n history_len=cmdline_args['K'],\n image_encoder_arch=cmdline_args['image_encoder_arch'],\n image_encoder_load=cmdline_args['image_encoder_load'],\n use_random_crops=True,\n pool_type=cmdline_args['pool_type'],\n action_output_type=cmdline_args['action_output_type'],\n impute_style=cmdline_args['impute_style'],\n data_dir=data_dir,\n gpt2_kwargs=gpt2_kwargs,\n std_bounds=std_bounds,\n modalities_to_mask=modalities_to_mask,\n bc_mode=bc_mode\n ).to(device=device)\n\n # Record the tune style parameters\n tune_style_kwargs['transformer_tune_style'] = cmdline_args['transformer_tune_style']\n\n elif cmdline_args['model'] == 'MLP':\n model = MLPBCModel(\n camera_names=camera_names,\n obs_dims=obs_dims,\n proprio_dim=proprio_dim,\n act_dim=action_dim,\n hidden_dim=cmdline_args['embed_dim'],\n history_len=cmdline_args['K'],\n image_encoder_arch=cmdline_args['image_encoder_arch'],\n image_encoder_load=cmdline_args['image_encoder_load'],\n use_random_crops=True,\n impute_style=cmdline_args['impute_style'],\n n_layer=cmdline_args['n_layer'],\n activation_function=cmdline_args['activation_function'],\n dropout=cmdline_args['dropout'],\n modalities_to_mask=modalities_to_mask,\n bc_mode=bc_mode,\n std_bounds=std_bounds,\n ).to(device=device)\n\n # Record the tune style parameters\n # TODO\n\n else:\n raise NotImplementedError(f'Unknown model type: {cmdline_args.model}')\n log('Model architecture:')\n log(str(model))\n\n if len(pretrained_state_dict) > 0:\n model.load_state_dict(pretrained_state_dict)\n log('Loaded successfully!')\n else:\n log('Training/finetuning the model from scratch!')\n\n return model, tune_style_kwargs" }, { "identifier": "set_trainable_params", "path": "PLEX/util/misc.py", "snippet": "def set_trainable_params(model, trainable_param_spec, log):\n model.set_requires_grad(**trainable_param_spec)\n trainable_params = [p for p in model.parameters() if p.requires_grad]\n num_trainable_params = sum([p.numel() for p in trainable_params])\n num_params = sum([p.numel() for p in model.parameters()])\n log(f'Training {num_trainable_params} out of {num_params} total parameters')\n return trainable_params" }, { "identifier": "setup_trainer", "path": "PLEX/util/misc.py", "snippet": "def setup_trainer(batch_sampler, lr, eval_fns, model, trainable_params, cmdline_args):\n optimizer = torch.optim.AdamW(\n trainable_params,\n lr=lr,\n weight_decay=cmdline_args['weight_decay'],\n )\n scheduler = torch.optim.lr_scheduler.LambdaLR(\n optimizer,\n lambda steps: min((steps+1)/cmdline_args['warmup_steps'], 1)\n )\n\n # Model-specific loss weights\n if cmdline_args['model'] == 'DT' or cmdline_args['model'] == 'MLP':\n loss_weights = {\n 'action': 1.0\n }\n elif cmdline_args['model'] == 'PLEX':\n loss_weights = {\n # This is the task-conditioned latent state prediction loss weight.\n # It should be 1.0 for PL pretraining and 0.0 for EX pretraining (since EX pretraining uses\n # task-agnostic data that makes task-conditioned latent state prediction impossible).\n # It should be 1.0 for target-task finetuning as well.\n 'future_prediction': cmdline_args['future_prediction_loss_weight']\n }\n # The EX part of PLEX (i.e., inversed dynamics -- action prediction based on the current and a future latent state)\n # can be trained using the future latent state of the training trajectory *or* the future latent state\n # predicted by the PL part of PLEX (the latent state predictor).\n # If we care about the former, we set grounded_inverse_dynamics_loss_weight = 1 and predicted_inverse_dynamics_loss_weight = 0.\n # If we care about the latter, then vice versa. In either case,\n # predicted_inverse_dynamics_loss_weight = 1 - grounded_inverse_dynamics_loss_weight.\n #\n # Namely, for EX pretraining we set grounded_inverse_dynamics_loss_weight = 1, because\n # the latent state predictor (PL) is unavailable at the time when EX is being pretrained.\n #\n # For PL pretraining, grounded_inverse_dynamics_loss_weight doesn't matter, because during PL pretraining\n # the inverse dynamics precictor (EX) is frozen and isn't affected by training, and the inverse dynamics\n # losses, in turn, don't affect the PL component of PLEX.\n #\n # For target-task finetuning of PLEX, we set predicted_inverse_dynamics_loss_weight = 1, because we want to adapt the\n # PL and EX components of PLEX to work together.\n for which in ['predicted', 'grounded']:\n key = f'{which}_inverse_dynamics'\n loss_weights[key] = cmdline_args[f'{key}_loss_weight']\n else:\n raise NotImplementedError\n\n return Trainer(\n model=model,\n optimizer=optimizer,\n get_batch=batch_sampler,\n batch_size=cmdline_args['batch_size'],\n target_frame_rate=cmdline_args['target_frame_rate'],\n pad_frame_gaps=cmdline_args['pad_frame_gaps'],\n scheduler=scheduler,\n loss_weights=loss_weights,\n eval_fns=eval_fns,\n )" }, { "identifier": "run_training", "path": "PLEX/util/misc.py", "snippet": "def run_training(trainer, model, num_steps, model_filename_prefix, cmdline_args, log, log_to_wandb, timer):\n log(f'Commencing training...')\n metrics = defaultdict(list)\n best = float('-inf')\n\n if cmdline_args['model'] == 'PLEX':\n model_info = f'plK{cmdline_args[\"obs_pred.K\"]}_plL{cmdline_args[\"obs_pred.n_layer\"]}_plH{cmdline_args[\"obs_pred.n_head\"]}_exK{cmdline_args[\"inv_d_pred.K\"]}_exL{cmdline_args[\"inv_d_pred.n_layer\"]}_exH{cmdline_args[\"inv_d_pred.n_head\"]}_res{cmdline_args[\"image_size\"]}_bc{cmdline_args[\"bc_learning_mode\"]}_la{cmdline_args[\"future_step\"]}_relpos{cmdline_args[\"relative_position_encodings\"]}__'\n elif cmdline_args['model'] == 'DT':\n model_info = f'K{cmdline_args[\"K\"]}_L{cmdline_args[\"n_layer\"]}_H{cmdline_args[\"n_head\"]}_res{cmdline_args[\"image_size\"]}_bc{cmdline_args[\"bc_learning_mode\"]}_relpos{cmdline_args[\"relative_position_encodings\"]}__'\n elif cmdline_args['model'] == 'MLP':\n model_info = f'K{cmdline_args[\"K\"]}_L{cmdline_args[\"n_layer\"]}_res{cmdline_args[\"image_size\"]}_bc{cmdline_args[\"bc_learning_mode\"]}__'\n else:\n raise NotImplementedError\n\n for iter in range(cmdline_args['max_iters']):\n with timer.time('iteration'):\n outputs = trainer.train_iteration(\n num_steps=num_steps,\n iter_num=iter+1,\n print_fn=log\n )\n\n for k, v in outputs.items():\n metrics[k].append(v)\n\n with open(log.dir/'metrics.pkl', 'wb') as f:\n pickle.dump(dict(metrics), f)\n\n if log_to_wandb:\n wandb.log(outputs)\n\n torch.save(model.state_dict(), log.dir/(model_filename_prefix + model_info + 'latest.pt'))\n torch.save(model.state_dict(), log.dir/(model_filename_prefix + model_info + f'iter_{iter+1}.pt'))\n metric_of_interest = outputs[cmdline_args['best_metric']]\n if metric_of_interest > best:\n best = metric_of_interest\n log(f'New best: {best}')\n torch.save(model.state_dict(), log.dir/(model_filename_prefix + model_info + 'best.pt'))\n\n print(f\"\\n\\nTHE BEST VALUE OF THE {cmdline_args['best_metric']} METRIC ACROSS ALL TRAINING ITERATIONS IS {best}.\")\n return dict(metrics)" }, { "identifier": "get_success_rate_evaluator", "path": "PLEX/util/evaluators.py", "snippet": "def get_success_rate_evaluator(task, traj_data, env_metadata, cmdline_args, log_dir):\n # Taking the average return of all trajectories as the target is dangerous: we may have many trajectories with low return.\n # target_return = sum(traj['reward'].sum() for traj in val_data.trajectories) / len(val_data.trajectories)\n get_context = setup_context_sampler(cmdline_args['context_style'])\n\n def eval_episodes(model, step):\n conditions = []\n\n if cmdline_args['record_video']:\n record_traj_dir = (log_dir/f'videos_from_epoch_{step}')\n record_traj_dir.mkdir(parents=True)\n else:\n record_traj_dir = None\n\n returns = []\n\n # ASSUMPTIONS:\n # -- In goal-directed tasks, each successful (goal-reaching) trajectory has a higher score than every non-goal-reaching one.\n # -- Every goal-reaching trajectory stays at a goal state once it reaches one.\n for traj in traj_data.trajectories:\n returns.append(discount_cumsum(traj['reward'], traj['success'][-1], cmdline_args['discount'])[0])\n\n returns.sort(reverse=True)\n # [top_return_LO, top_return_HI] is the range of returns corresponding to the top cmdline_args['top_return_fraction'] fraction of\n # the demonstration trajectories\n top_return_LO = returns[math.ceil(cmdline_args['top_return_fraction'] * len(returns)) - 1]\n top_return_HI = returns[0]\n\n if not cmdline_args['bc_learning_mode']:\n print(f\"Top return range: {top_return_LO} -- {top_return_HI}\")\n\n for e in range(cmdline_args['num_eval_episodes']):\n while True:\n # During evaluation with success_rate (and simultaneously success rate) as the metric,\n # validation_frac is just the fraction of training trajectories whose goal images will serve as contexts during evaluation.\n # Note that each episode will generally start with a scene where even objects other than the goal objects will\n # generally be positioned differently than in any goal image the agent has seen in training, so sampling evaluation-time\n # contexts from among the training trajectory goals is fine.\n val_traj = random.choice(traj_data.trajectories)\n context, is_valid = get_context(val_traj, 0, len(val_traj['reward']))\n if is_valid:\n break\n\n target_return = (top_return_LO + random.random() * (top_return_HI - top_return_LO))\n # If the learning mode *is* BC (as opposed to offline RL), then we will ignore\n # target return during conditioning, so its value won't matter.\n if not cmdline_args['bc_learning_mode']:\n print(f\"Target return for episode {e}: {target_return}\")\n\n conditions.append((context, target_return))\n\n if not cmdline_args['bc_learning_mode']:\n # Make sure that either:\n # (a) these settings are the same as at training time or\n # (b) the model was trained and is being evaluated in BC mode (i.e., rewards/returns weren't used\n # at training time and are ignored at evaluation time).\n print(f'Is the reward normalized **at evaluation time**: {cmdline_args[\"normalize_reward\"]}')\n print(f'Type of reward to be used for conditioning at evaluation time: {cmdline_args[\"reward_type\"]}')\n\n returns, succ_episodes, lengths = evaluate_parallel(\n conditions, task, model,\n device=cmdline_args.get('device', 'cuda'),\n use_normalized_reward=cmdline_args['normalize_reward'],\n reward_type=cmdline_args['reward_type'],\n env_meta=env_metadata,\n full_state_mode = globals.full_state_mode,\n min_time_at_goal_for_success=cmdline_args['min_time_at_goal_for_success'],\n camera_names=parse_comma_sep_param_value(cmdline_args['camera_names']),\n image_size=cmdline_args['image_size'],\n num_workers=cmdline_args['num_eval_workers'],\n max_ep_len=cmdline_args['max_eval_episode_len'],\n discount=cmdline_args['discount'],\n record_camera=(DEFAULT_CAM[task.dataset_type] if cmdline_args['record_camera'] is None else cmdline_args['record_camera']),\n record_traj_dir=record_traj_dir\n )\n\n num_succ = len([s for s in succ_episodes if s is True])\n success_rate = num_succ/len(succ_episodes)*100\n\n print(f'Iteration {step} SUCCESS RATE: {success_rate}%')\n print(f'Iteration {step} MEAN NATIVE RETURN: {np.mean(returns)}')\n print(f'Iteration {step} MEAN EPISODE LENGTH: {np.mean(lengths)}')\n\n return {\n 'success_rate': success_rate,\n 'return_mean': np.mean(returns),\n 'return_std': np.std(returns),\n 'length_mean': np.mean(lengths),\n 'length_std': np.std(lengths)\n }\n\n return eval_episodes" }, { "identifier": "get_validation_error_evaluator", "path": "PLEX/util/evaluators.py", "snippet": "def get_validation_error_evaluator(dataset, cmdline_args, device):\n get_val_batch = setup_batch_sampler(dataset, cmdline_args['context_style'], cmdline_args, device)\n\n def validation_error(model, iter):\n errors = []\n for _ in range(cmdline_args['validation_samples']):\n contexts, images, states, actions, rewards, rtg, timesteps, attention_mask = get_val_batch(cmdline_args['batch_size'],\n cmdline_args['target_frame_rate'],\n cmdline_args['pad_frame_gaps'])\n with torch.no_grad():\n action_preds = model.forward(\n contexts, images, states,\n actions if isinstance(model.module, PLEX) else actions[:,:-1],\n rewards,\n rtg,\n timesteps,\n mask=attention_mask,\n )[0]\n\n if isinstance(model.module, PLEX):\n act_dim = action_preds.shape[2]\n attention_mask_shortened = attention_mask[:,:-cmdline_args['future_step']]\n action_preds = action_preds.reshape(-1, act_dim)[attention_mask_shortened.reshape(-1) > 0]\n action_target = torch.clone(actions[:,:-cmdline_args['future_step']]).reshape(-1, act_dim)[attention_mask_shortened.reshape(-1) > 0]\n else:\n action_target = actions[:,-1]\n\n # We are negating the error here for consistency with other metrics, which are maximization metrics.\n error = -torch.mean((action_preds - action_target) ** 2).item()\n errors.append(error)\n return {\n f'neg_val_error': np.mean(errors)\n }\n return validation_error" }, { "identifier": "add_common_args", "path": "PLEX/util/cmdline.py", "snippet": "def add_common_args(parser):\n # Logging\n parser.add_argument('--log_dir', type=str, default='~/logs')\n parser.add_argument('--log_id', type=str, default=None)\n parser.add_argument('--log_to_wandb', '-w', action='store_true')\n\n # General setup\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--device', type=str, default='cuda')\n\n # Core model\n parser.add_argument('--model', type=str, default='DT')\n # This is the load path for the starting model. If None, the starting model is initialized randomly.\n parser.add_argument('--load_path', type=str, default=None)\n parser.add_argument('--modalities_to_mask', type=str, default='action')\n parser.add_argument('--impute_style', type=str, default='trainable')\n\n # Parameters for the Gaussian action head, if used\n parser.add_argument('--std_min', type=float, default=0.001)\n parser.add_argument('--std_max', type=float, default=1.0)\n\n ### Decision transformer parameters\n parser.add_argument('--K', type=int, default=10)\n parser.add_argument('--n_layer', type=int, default=None) # The default is None to easily detect when this pipeline is running the DT model unintentionally.\n parser.add_argument('--n_head', type=int, default=None)\n parser.add_argument('--activation_function', type=str, default='relu')\n parser.add_argument('--dropout', type=float, default=0.1)\n parser.add_argument('--embed_dim', type=int, default=128) # NOTE: embed_dim must be a multiple of n_head!\n parser.add_argument('--transformer_tune_style', type=str, default=None,\n choices=['all', 'last_block', 'linear_probe', 'none'])\n ### PLEX parameters\n parser.add_argument('--future_step', type=int, default=1)\n parser.add_argument('--obs_pred.n_layer', type=int, default=None)\n parser.add_argument('--obs_pred.n_head', type=int, default=None)\n parser.add_argument('--obs_pred.K', type=int, default=None)\n parser.add_argument('--obs_pred.transformer_tune_style', type=str, default=None,\n choices=['all', 'last_block', 'linear_probe', 'none'])\n\n parser.add_argument('--inv_d_pred.n_layer', type=int, default=None)\n parser.add_argument('--inv_d_pred.n_head', type=int, default=None)\n parser.add_argument('--inv_d_pred.K', type=int, default=None)\n parser.add_argument('--inv_d_pred.transformer_tune_style', type=str, default=None,\n choices=['all', 'last_block', 'linear_probe', 'none'])\n ### This applies only to transformer-based models\n add_boolean_arg(parser, 'relative_position_encodings', true='--relative_position_encodings', false='--absolute_position_encodings', default=True)\n parser.add_argument('--action_output_type', type=str, default='deterministic',\n choices=['deterministic', 'gaussian', 'gaussian_mixture'])\n\n # Image encoder\n parser.add_argument('--image_encoder_arch', type=str, default='resnet18')\n parser.add_argument('--image_encoder_load', type=str, default=None)\n parser.add_argument('--pool_type', type=str, default='SpatialSoftmax')\n parser.add_argument('--image_encoder_tune_style', type=str, default='all') # none, fc, lastN (N an integer), or all\n\n # Data\n parser.add_argument('--data_dir', type=str, default='~/data')\n # --camera_names can have a special value FULL_STATE.\n # FULL_STATE means that the agent should use the full_state field returned by the data/env, and should *not* use proprio states.\n # In this case, the encoder is automatically set to be a linear layer mapping the full state dimentsion to the model's hidden dimnesion.\n # The image_size should then have the size M,1 or 1,N, where M or N are the length of the full state vectors.\n parser.add_argument('--camera_names', type=str, default='agentview') # E.g., --camera_names=agentview,robot0_eye_in_hand\n # If --image_size is a single number N, the image is interpreted to be of dimensions N x N.\n # If it is two numbers -- M,N -- the image is interpreted to have height M and width N.\n # NOTE: If --image_size is two numbers -- M,N as above -- and either M or N is 1, the image contents are interpreted as\n # an image embedding vector. The --image_encoder_arch is then ignored, and the encoder is automatically set to be a linear layer mapping\n # the embedding dimension to the model's hidden dimnesion.\n parser.add_argument('--image_size', type=int, default=84)\n # Frames-per-second for the desired frame rate (usually, a target task's). The default is to ignore frame rates.\n parser.add_argument('--target_frame_rate', type=int, default=None)\n add_boolean_arg(parser, 'pad_frame_gaps', default=True,\n true='--pad_frame_gaps', false='--copy_into_frame_gaps')\n # Dynamics and action spaces are generally problem-specific, so we use robot-specifc data for them, as well as for validation tasks.\n parser.add_argument('--robot', type=str, default=None)\n\n # Training\n parser.add_argument('--batch_size', type=int, default=32)\n parser.add_argument('--max_iters', type=int, default=10)\n parser.add_argument('--warmup_steps', type=int, default=100)\n parser.add_argument('--weight_decay', type=float, default=1e-4)\n\n # Evaluation\n # What fraction of the top demo trajectory returns will be used during evaluation?\n # NOTE: this parameter is relevant only if we are in the offline RL mode, not BC mode.\n parser.add_argument('--top_return_fraction', type=float, default=1.0)\n parser.add_argument('--best_metric', type=str, default='evaluation/neg_val_error', choices=['evaluation/neg_val_error', 'evaluation/success_rate'])\n # NOTE: during pretraining, --validation_frac applies *only* to tasks specified by --validation_tasks,\n # and specify the fraction of these tasks' trajectrories that will be used for validation.\n #\n # NOTE: The remaining validation trajectories of these tasks will be used for pretraining.\n # I.e., if you want all data of the --validation_tasks to be used only for validation and none for pretraining,\n # set --validation_frac=1.0 (or just don't specify --validation_frac at all, since 1.0 is the default).\n #\n # During finetuning, --validation_frac applies only to --target_task, and --validation_tasks must be None.\n #\n # NOTE: If during finetuning --best_metric is evaluation/success_rate (i.e., success rate),\n # --validation_frac is ignored and all of --target_task's trajectories are used for training. In this case,\n # validation loss isn't computed.\n #\n # NOTE: the following parameters are relevant only if best_metric is negative validation error.\n parser.add_argument('--validation_frac', type=float, default=1.0)\n parser.add_argument('--validation_samples', type=int, default=100) # how many sample batches on which to measure error\n # NOTE: the following parameters are relevant only if best_metric is success rate.\n parser.add_argument('--max_eval_episode_len', type=int, default=500)\n parser.add_argument('--num_eval_episodes', type=int, default=10)\n parser.add_argument('--num_eval_workers', type=int, default=5)\n parser.add_argument('--min_time_at_goal_for_success', type=int, default=5) # Minimum number of consecutive time steps an agent should spend at a goal state during an evaluation episode for the episode to terminate with a success.\n parser.add_argument('--record_camera', type=str, default=None)\n add_boolean_arg(parser, 'record_video', true='--record_video', false='--no_video', default=False)" }, { "identifier": "add_conditioning_args", "path": "PLEX/util/cmdline.py", "snippet": "def add_conditioning_args(parser):\n # Chooses between behavior cloning mode (the default, involves conditioning only on a goal, if available)\n # and offline RL mode (involves conditioning on a goal, if available, and on a return).\n add_boolean_arg(parser, 'bc_learning_mode', true='--bc_learning_mode', false='--orl_learning_mode', default=True)\n parser.add_argument('--context_style', type=str, default='first-success')\n add_boolean_arg(parser, 'context_from_same_traj', true='--context_from_same_traj', false='--context_from_diff_traj', default=False)\n # reward_type can be 'native', 'negative', 'random', 'zero', or 'sparse'.\n parser.add_argument('--reward_type', type=str, default='native')\n add_boolean_arg(parser, 'normalize_reward', true='--normalize_reward', false='--use_raw_reward', default=False)\n parser.add_argument('--discount', type=float, default=0.99)" }, { "identifier": "setup_wandb_logging", "path": "PLEX/util/log.py", "snippet": "def setup_wandb_logging(group_name, cmdline_args):\n exp_prefix = f'{group_name}_{random.randint(int(1e5), int(1e6) - 1)}'\n wandb.init(\n name=exp_prefix,\n group=group_name,\n project='PLEX',\n config=cmdline_args\n )\n # wandb.watch(model) # wandb has some bug" } ]
import os import torch import argparse import sys from PLEX.util.data import TrajectoryDataset, load_data, setup_batch_sampler, train_val_split from PLEX.util.misc import parse_tasks, setup_essentials, setup_model, set_trainable_params, setup_trainer, run_training from PLEX.util.evaluators import get_success_rate_evaluator, get_validation_error_evaluator from PLEX.util.cmdline import add_common_args, add_conditioning_args from PLEX.util.log import setup_wandb_logging
11,755
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1 log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args) # NOTE: common_env_metadata_dict may be modified by the calls to load_data below. # Load data: target-task trajectories target_tasks, target_max_trajs = parse_tasks(args['target_task'], args['robot'], args['max_target_trajectories']) target_task = target_tasks[0] data = load_data(log, data_dir, target_tasks, max_trajectories=target_max_trajs, discount=args['discount'], camera_names=camera_names, image_size=args['image_size'], target_frame_rate=args['target_frame_rate'], normalize_rewards=args['normalize_reward'], reward_type=args['reward_type'], common_env_metadata_dict=common_env_metadata_dict, data_shuffling_rng=data_shuffling_rng) assert len(data.keys()) == 1, f"There should be only one target task. Discovered {len(data.keys())}: {data.keys()}" #assert args['validation_tasks'] is None, f"Validation tasks other than the target tasks aren't used during finetuning and were likely specified erroneously: {args['validation_tasks']}." # Train/test split # NOTE: we don't actually need create the split if args['best_metric'] == 'evaluation/success_rate' if args['best_metric'] == 'evaluation/success_rate': print("WARNING: since the evaluation metric is success rate, the training-validation split of the target task data will be ignored, and all target-task trajectories will be used for training.") train_trajectories, val_trajectories = train_val_split(data[target_task.name], args['validation_frac'])
def finetune(cmdline_args): os.environ["NCCL_DEBUG"] = "INFO" print("=== Finetuning ===") parser = argparse.ArgumentParser() # Add all relevant command-line arguments add_common_args(parser) add_conditioning_args(parser) parser.add_argument('--finetune_learning_rate', type=float, default=1e-5) parser.add_argument('--finetune_steps_per_iter', type=int, default=100) parser.add_argument('--target_task', type=str, default=None) parser.add_argument('--max_target_trajectories', type=int, default=None) # Parse them and validate them args = parser.parse_args(cmdline_args) args = vars(args) if not args['bc_learning_mode']: assert 'reward' not in args['modalities_to_mask'], "If the model is expected to condition on returns, then they should not be masked out." # NOTE: The arguments below aren't actual command-line arguments. We are just addeing them to args[] out of convenience. # Note also that during finetuning we set predicted_inverse_dynamics_loss_weight=1, i.e., **in case the # finetuning trajectories contain actions**, we adapt PLEX's based on the predicted observation latents # from it planner PL rather than based on the actual ("grounded") observation latents contained # in finetuning trajectories. if args['model'] == 'PLEX': args['grounded_inverse_dynamics_loss_weight'] = 0 args['predicted_inverse_dynamics_loss_weight'] = 1 args['future_prediction_loss_weight'] = 1 log, log_to_wandb, timer, data_shuffling_rng, device, camera_names, modalities_to_mask, data_dir, common_env_metadata_dict = setup_essentials(args) # NOTE: common_env_metadata_dict may be modified by the calls to load_data below. # Load data: target-task trajectories target_tasks, target_max_trajs = parse_tasks(args['target_task'], args['robot'], args['max_target_trajectories']) target_task = target_tasks[0] data = load_data(log, data_dir, target_tasks, max_trajectories=target_max_trajs, discount=args['discount'], camera_names=camera_names, image_size=args['image_size'], target_frame_rate=args['target_frame_rate'], normalize_rewards=args['normalize_reward'], reward_type=args['reward_type'], common_env_metadata_dict=common_env_metadata_dict, data_shuffling_rng=data_shuffling_rng) assert len(data.keys()) == 1, f"There should be only one target task. Discovered {len(data.keys())}: {data.keys()}" #assert args['validation_tasks'] is None, f"Validation tasks other than the target tasks aren't used during finetuning and were likely specified erroneously: {args['validation_tasks']}." # Train/test split # NOTE: we don't actually need create the split if args['best_metric'] == 'evaluation/success_rate' if args['best_metric'] == 'evaluation/success_rate': print("WARNING: since the evaluation metric is success rate, the training-validation split of the target task data will be ignored, and all target-task trajectories will be used for training.") train_trajectories, val_trajectories = train_val_split(data[target_task.name], args['validation_frac'])
target_all_data = TrajectoryDataset(data[target_task.name], camera_names, contextual=True)
0
2023-11-06 09:38:09+00:00
16k
Giftify-Bot/Giftify-Bot
bot.py
[ { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[discord.TextChannel]\n The logging text channel for the guild.\n ping: Optional[discord.Role]\n The role to ping for notifications.\n reaction: str\n The reaction used for giveaways.\n participants_reaction,: str\n The reaction used for giveaways participants button.\n required_roles: List[discord.Role]\n The default roles required to join giveaway.\n blacklisted_roles: List[discord.Role]\n The default roles blacklisted from joining a giveaway.\n bypass_roles: List[discord.Role]\n The roles that bypass_roles certain restrictions.\n multiplier_roles: Dict[discord.Role, int]\n The multiplier_roles points assigned to each role.\n managers: List[discord.Role]\n The roles with manager permissions.\n dm_winner: bool\n Whether to send a direct message to the winner.\n dm_host: bool\n Whether to send a direct message to the host.\n channel_settings: List[ChannelConfig]\n The settings for each channel.\n color: discord.Colour\n The color used for messages.\n button_style: discord.ButtonStyle\n The style of the button.\n end_message: str\n The message sent when a giveaway ends.\n reroll_message: str\n The message sent when a giveaway rerolls.\n dm_message: str\n The direct message sent to winner.\n dm_host_message: str\n The direct message sent to host.\n gw_header: str\n The header for the giveaway message.\n gw_end_header: str\n The header for the giveaway end.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"guild\",\n \"logging\",\n \"ping\",\n \"reaction\",\n \"participants_reaction\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"managers\",\n \"dm_winner\",\n \"dm_host\",\n \"channel_settings\",\n \"color\",\n \"button_style\",\n \"end_message\",\n \"reroll_message\",\n \"dm_message\",\n \"dm_host_message\",\n \"gw_header\",\n \"gw_end_header\",\n )\n\n def __init__(\n self,\n guild: discord.Guild,\n *,\n logging: Optional[discord.TextChannel],\n ping: Optional[discord.Role],\n reaction: str,\n participants_reaction: str,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n managers: List[discord.Role],\n dm_winner: bool,\n dm_host: bool,\n channel_settings: List[ChannelConfig],\n color: discord.Colour,\n button_style: discord.ButtonStyle,\n end_message: str,\n reroll_message: str,\n dm_message: str,\n dm_host_message: str,\n gw_header: str,\n gw_end_header: str,\n ):\n self.guild = guild\n self.logging = logging\n self.ping = ping\n self.reaction = reaction\n self.participants_reaction = participants_reaction\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.managers = managers\n self.dm_winner = dm_winner\n self.dm_host = dm_host\n self.channel_settings = channel_settings\n self.color = color\n self.button_style = button_style\n self.end_message = end_message\n self.reroll_message = reroll_message\n self.dm_host_message = dm_host_message\n self.dm_message = dm_message\n self.gw_header = gw_header\n self.gw_end_header = gw_end_header\n\n def __repr__(self):\n return f\"<GuildConfig guild={self.guild!r}>\"\n\n @staticmethod\n async def _create_config(guild_id: int, pool: asyncpg.Pool) -> asyncpg.Record:\n return await pool.fetchrow(\n \"INSERT INTO configs (guild) VALUES ($1) RETURNING *\",\n guild_id,\n )\n\n @classmethod\n def _from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n channel_data: List[asyncpg.Record],\n ) -> \"GuildConfig\":\n data = dict(data)\n data[\"color\"] = discord.Colour(data[\"color\"])\n\n data[\"logging\"] = guild.get_channel(data[\"logging\"])\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier\n for role, multiplier in data[\"multiplier_roles\"].items()\n if role is not None and multiplier > 1\n }\n data[\"managers\"] = [\n guild.get_role(role) for role in data[\"managers\"] if role is not None\n ]\n\n data[\"button_style\"] = discord.utils.get(\n discord.ButtonStyle, value=data[\"button_style\"]\n )\n\n data[\"channel_settings\"] = [\n channel_setting\n for record in channel_data\n if (channel_setting := ChannelConfig.from_data(guild, record))\n ]\n\n data.pop(\"guild\") # We do not need this.\n\n return cls(guild, **data)\n\n def to_dict(self) -> GuildConfigData:\n \"\"\"Converts this GuildConfig object into a dict.\"\"\"\n\n data = GuildConfigData(\n guild=self.guild.id,\n reaction=self.reaction,\n participants_reaction=self.participants_reaction,\n required_roles=[\n role.id for role in self.required_roles if role is not None\n ],\n blacklisted_roles=[\n role.id for role in self.blacklisted_roles if role is not None\n ],\n bypass_roles=[role.id for role in self.bypass_roles if role is not None],\n multiplier_roles={\n role.id: multiplier_roles\n for role, multiplier_roles in self.multiplier_roles.items()\n if role is not None\n },\n managers=[role.id for role in self.managers if role is not None],\n dm_winner=self.dm_winner,\n dm_host=self.dm_host,\n color=int(self.color),\n button_style=self.button_style.value,\n end_message=self.end_message,\n reroll_message=self.reroll_message,\n dm_message=self.dm_message,\n dm_host_message=self.dm_host_message,\n gw_header=self.gw_header,\n gw_end_header=self.gw_end_header,\n ) # type: ignore\n if self.logging:\n data[\"logging\"] = self.logging.id\n if self.ping:\n data[\"ping\"] = self.ping.id\n return data\n\n @classmethod\n async def fetch(cls, guild: discord.Guild, pool: asyncpg.Pool) -> \"GuildConfig\":\n \"\"\"Create a GuildConfig instance from data retrieved from a database.\n\n Parameters\n ----------\n guild: discord.Guild\n The discord guild.\n pool: asyncpg.Pool\n The database connection pool.\n\n Returns\n -------\n GuildConfig\n An instance of GuildConfig populated with the retrieved data.\n \"\"\"\n\n data = await pool.fetchrow(\"SELECT * FROM configs WHERE guild = $1\", guild.id)\n channel_data: List[asyncpg.Record] = await pool.fetch(\n \"SELECT * FROM channel_configs WHERE guild = $1\", guild.id\n )\n\n if not data:\n data: asyncpg.Record = await cls._create_config(guild.id, pool)\n\n return cls._from_data(guild, data, channel_data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"GuildConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n GuildConfig\n The updated `GuildConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n data = self.to_dict()\n\n columns = \", \".join(data.keys())\n placeholders = \", \".join([f\"${i+1}\" for i in range(len(data))])\n update_clause = \", \".join(\n [f\"{key} = EXCLUDED.{key}\" for key in data.keys() if key != \"guild\"]\n )\n\n query = f\"\"\"\n INSERT INTO configs ({columns}) \n VALUES ({placeholders})\n ON CONFLICT (guild) DO \n UPDATE SET {update_clause}\n \"\"\"\n\n values = list(data.values())\n await pool.execute(query, *values)\n return self\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> ChannelConfig:\n ...\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = False,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n ...\n\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n \"\"\"\n Retrieves the configuration for a specific channel.\n\n Parameters\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel for which to retrieve the configuration.\n create_if_not_exists: Optional[bool]\n Whether to create a new configuration if it doesn't exist. Default is True.\n pool: Optional[asyncpg.Pool]\n The connection pool for interacting with the database.\n\n Returns\n -------\n Optional[ChannelConfig]\n The ChannelConfig object if it exists, or None if it doesn't exist and create_if_not_exists is set to False.\n\n Raises\n ------\n MaxChannelConfigCreationError\n If create_if_not_exists is True and the maximum number of channel configurations has already been reached.\n \"\"\"\n\n config = discord.utils.get(self.channel_settings, channel=channel)\n if config is not None:\n return config\n\n if create_if_not_exists:\n if len(self.channel_settings) >= 25:\n raise MaxChannelConfigCreationError()\n else:\n if pool:\n config = await ChannelConfig.create(channel.guild, channel, pool)\n self.channel_settings.append(config)\n return config\n\n return None" }, { "identifier": "Giveaway", "path": "models/giveaways.py", "snippet": "class Giveaway:\n \"\"\"\n Represents a giveaway object.\n\n Attributes\n ----------\n bot: Giftify\n The bot instance to handle the giveaway.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the giveaway message.\n extra_message_id: int\n The ID of the extra message with giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the user donating for the giveaway.\n prize: int\n The prize of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n winners: List[int]\n The winners of the giveaway.\n participants: List[int]\n The IDs participants for the giveaway.\n ended: bool\n Indicates whether the giveaway has ended.\n ends: datetime.datetime\n The timestamp when the giveaway will be ended.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[List[int]]\n The ID of the channels where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n \"\"\"\n\n __slots__ = (\n \"bot\",\n \"guild_id\",\n \"channel_id\",\n \"message_id\",\n \"extra_message_id\",\n \"prize\",\n \"host_id\",\n \"donor_id\",\n \"winner_count\",\n \"winners\",\n \"participants\",\n \"ended\",\n \"ends\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"messages\",\n \"messages_required\",\n \"allowed_message_channels\",\n \"amari\",\n \"weekly_amari\",\n )\n\n def __init__(self, *, bot: Giftify, record: asyncpg.Record):\n self.bot = bot\n self.guild_id: int = record[\"guild\"]\n self.channel_id: int = record[\"channel\"]\n self.message_id: int = record[\"message\"]\n self.extra_message_id: int = record[\"extra_message\"]\n self.prize: str = record[\"prize\"]\n self.host_id: int = record[\"host\"]\n self.donor_id: Optional[int] = record[\"donor\"]\n self.winner_count: int = record[\"winner_count\"]\n self.winners: List[int] = record[\"winners\"]\n self.participants: List[int] = record[\"participants\"]\n self.ended: bool = record[\"ended\"]\n self.ends: datetime.datetime = record[\"ends\"]\n self.required_roles: List[int] = record[\"required_roles\"] or []\n self.blacklisted_roles: List[int] = record[\"blacklisted_roles\"] or []\n self.bypass_roles: List[int] = record[\"bypass_roles\"] or []\n self.multiplier_roles: Dict[int, int] = {\n int(role): entries\n for role, entries in record[\"multiplier_roles\"].items()\n if entries > 1\n }\n self.messages: Dict[int, int] = {\n int(member): messages for member, messages in record[\"messages\"].items()\n }\n self.messages_required: Optional[int] = record[\"messages_required\"]\n self.allowed_message_channels: Optional[List[int]] = record[\"messages_channel\"]\n self.amari: Optional[int] = record[\"amari\"]\n self.weekly_amari: Optional[int] = record[\"weekly_amari\"]\n\n def __eq__(self, other: \"Giveaway\") -> bool:\n try:\n return (\n self.guild_id == other.guild_id\n and self.channel_id == other.channel_id\n and self.message_id == other.message_id\n )\n except AttributeError:\n return False\n\n def __hash__(self) -> int:\n return hash((self.guild_id, self.channel_id, self.message_id))\n\n def __repr__(self) -> str:\n return f\"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>\"\n\n @property\n def jump_to_giveaway(self) -> discord.ui.View:\n url = f\"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}\"\n view = BaseView(timeout=None)\n button = discord.ui.Button(label=\"Jump To Giveaway\", url=url)\n view.add_item(button)\n return view\n\n @staticmethod\n def create_embed(\n interaction: Interaction,\n config: GuildConfig,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n donor: Optional[discord.Member] = None,\n ) -> discord.Embed:\n assert interaction.guild is not None\n\n description = f\"Click the {config.reaction} button to join the giveaway!\\n\"\n description += f\"Hosted By: {interaction.user.mention}\\n\"\n\n if donor:\n description += f\"Donor: {donor.mention}\\n\"\n\n description += f\"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\\n\"\n\n embed = discord.Embed(\n title=prize,\n description=description,\n colour=config.color,\n timestamp=duration,\n )\n embed.set_footer(\n text=f\"{winners} winner(s) • Ends\",\n icon_url=interaction.guild.icon or interaction.client.user.display_avatar,\n )\n requirements = \"\"\n if required_roles:\n requirements += f\"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\\n\"\n if bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\\n\"\n\n if blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\\n\"\n if messages_required:\n requirements += (\n f\"Messages Required: **{messages_required}** message(s) (5s cooldown)\\n\"\n )\n if allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\\n\"\n\n if amari:\n requirements += f\"Amari Level: {amari}\\n\"\n if weekly_amari:\n requirements += f\"Weekly Amari: {weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if multiplier_roles:\n multiplier_roles_mention = \"\\n\".join(\n [\n f\"- {entry}x ・ {role.mention}\"\n for role, entry in multiplier_roles.items()\n if role is not None\n ]\n )\n embed.add_field(\n name=\"Bonus Entries\", value=multiplier_roles_mention, inline=False\n )\n\n return embed\n\n @classmethod\n async def start(\n cls,\n interaction: Interaction,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n config: GuildConfig,\n channel_config: Optional[ChannelConfig],\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n image: Optional[discord.Attachment] = None,\n donor: Optional[discord.Member] = None,\n ping: bool = False,\n message: Optional[str] = None,\n ):\n assert isinstance(interaction.channel, discord.TextChannel)\n assert interaction.guild is not None\n\n embed = cls.create_embed(\n interaction=interaction,\n config=config,\n duration=duration,\n winners=winners,\n prize=prize,\n required_roles=required_roles,\n blacklisted_roles=blacklisted_roles,\n bypass_roles=bypass_roles,\n multiplier_roles=multiplier_roles,\n messages_required=messages_required,\n allowed_message_channels=allowed_message_channels,\n amari=amari,\n weekly_amari=weekly_amari,\n donor=donor,\n )\n view = GiveawayView(\n config.reaction, config.participants_reaction, config.button_style\n )\n giveaway_message = await interaction.channel.send(\n config.gw_header, embed=embed, view=view\n )\n\n message_embed = discord.Embed(\n title=f\"{GIFT_EMOJI} Giveaway\",\n description=f\"**Message・** {message}\" if message else None,\n color=config.color,\n )\n\n if image:\n message_embed.set_image(url=image)\n\n extra_message = None\n\n if ping or image:\n ping_role = (\n channel_config.ping\n if channel_config and channel_config.ping\n else config.ping\n )\n extra_message = await interaction.channel.send(\n ping_role.mention if ping_role else \"\",\n embed=message_embed if message or image else None, # type: ignore\n allowed_mentions=discord.AllowedMentions(roles=True),\n )\n\n if extra_message is None and message is not None:\n extra_message = await interaction.channel.send(embed=message_embed)\n\n await interaction.client.timer_cog.create_timer(\n message_id=giveaway_message.id,\n channel_id=interaction.channel.id,\n guild_id=interaction.guild.id,\n author_id=interaction.user.id,\n title=\"Giveaway\",\n event=\"giveaway\",\n expires=duration,\n pool=interaction.client.pool,\n )\n\n return await cls.create_entry(\n bot=interaction.client,\n guild_id=interaction.guild.id,\n channel_id=interaction.channel.id,\n message_id=giveaway_message.id,\n prize=prize,\n host_id=interaction.user.id,\n donor_id=donor.id if donor else None,\n winner_count=winners,\n ends=duration,\n required_roles=[role.id for role in required_roles if role is not None]\n if required_roles\n else [],\n blacklisted_roles=[\n role.id for role in blacklisted_roles if role is not None\n ]\n if blacklisted_roles\n else [],\n bypass_roles=[role.id for role in bypass_roles if role is not None]\n if bypass_roles\n else [],\n multiplier_roles={\n role.id: entries\n for role, entries in multiplier_roles.items()\n if role is not None\n }\n if multiplier_roles\n else {},\n messages={},\n messages_required=messages_required,\n allowed_message_channels=[c.id for c in allowed_message_channels]\n if allowed_message_channels\n else [],\n extra_message_id=extra_message.id if extra_message else None,\n amari=amari,\n weekly_amari=weekly_amari,\n )\n\n @classmethod\n async def create_entry(\n cls,\n bot: Giftify,\n guild_id: int,\n channel_id: int,\n message_id: int,\n prize: str,\n host_id: int,\n winner_count: int,\n ends: datetime.datetime,\n required_roles: List[int],\n blacklisted_roles: List[int],\n bypass_roles: List[int],\n donor_id: Optional[int],\n multiplier_roles: Optional[dict],\n messages: Optional[dict],\n messages_required: Optional[int],\n allowed_message_channels: Optional[List[int]],\n extra_message_id: Optional[int],\n amari: Optional[int],\n weekly_amari: Optional[int],\n ) -> \"Giveaway\":\n \"\"\"\n Create a new Giveaway object and insert it into the database.\n\n Parameters\n ----------\n bot: Giftify\n The bot instance.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the message having the giveaway view.\n prize: str\n The prize of the giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the donor of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n ends: datetime.datetime\n The time when the giveaway ends.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[int]\n The ID of the channel where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n\n Returns\n -------\n Giveaway\n The created Giveaway object.\n \"\"\"\n record = await bot.pool.fetchrow(\n \"INSERT INTO giveaways (guild, channel, message, extra_message, host, donor, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, messages_channel, amari, weekly_amari) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) \"\n \"RETURNING *\",\n guild_id,\n channel_id,\n message_id,\n extra_message_id,\n host_id,\n donor_id,\n prize,\n winner_count,\n ends,\n required_roles,\n blacklisted_roles,\n bypass_roles,\n multiplier_roles,\n messages,\n messages_required,\n allowed_message_channels,\n amari,\n weekly_amari,\n )\n return cls(bot=bot, record=record)\n\n async def check_requirements(self, member: discord.Member) -> None:\n missing_roles = [\n role.mention\n for role_id in self.required_roles\n if (role := member.guild.get_role(role_id)) and role not in member.roles\n ]\n if missing_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you are missing the following required roles: {', '.join(missing_roles)}\"\n )\n\n blacklisted_roles = [\n role.mention\n for role_id in self.blacklisted_roles\n if (role := member.guild.get_role(role_id)) and role in member.roles\n ]\n if blacklisted_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you have the following blacklisted roles: {', '.join(blacklisted_roles)}\"\n )\n\n if self.amari:\n if (user_level := await self.bot.fetch_level(member)) < self.amari:\n raise GiveawayError(\n f\"Your amari level is less than the required level, you need `{self.amari - user_level}` more level(s) to join the giveaway.\"\n )\n\n if self.weekly_amari:\n if (\n weekly_exp := await self.bot.fetch_weekly_experience(member)\n ) < self.weekly_amari:\n raise GiveawayError(\n f\"Your weekly amari experience is less than the required weekly amari experience, you need `{self.weekly_amari - weekly_exp}` more experience point(s) to join the giveaway.\"\n )\n\n if self.messages_required and self.messages_required > 0:\n if (\n user_messages := self.messages.get(member.id, 0)\n ) < self.messages_required:\n raise GiveawayError(\n f\"You have sent less messages than the required messages, you need to send `{self.messages_required - user_messages}` more messages to join the giveaway.\"\n )\n\n def can_bypass(self, member: discord.Member) -> bool:\n return any(\n member.guild.get_role(role_id) in member.roles\n for role_id in self.bypass_roles\n )\n\n def get_multiplier_entries(self, member: discord.Member) -> int:\n entries = 0\n for role_id, multiplier_roles_entries in self.multiplier_roles.items():\n if member.get_role(int(role_id)):\n entries += multiplier_roles_entries\n\n return entries or 1\n\n async def join(self, member: discord.Member) -> int:\n try:\n await self.check_requirements(member)\n except GiveawayError as error:\n if not self.can_bypass(member):\n raise error\n\n if member.id in self.participants:\n raise GiveawayError(\"You have already joined the giveaway.\")\n\n number_of_entries = self.get_multiplier_entries(member)\n entries = [member.id] * number_of_entries\n\n self.participants += entries\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def leave(self, member: discord.Member) -> int:\n if member.id not in self.participants:\n raise GiveawayError(\"You are not a participant of this giveaway.\")\n\n self.participants = [\n participant for participant in self.participants if participant != member.id\n ]\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def _end(self):\n await self.bot.pool.execute(\n \"UPDATE giveaways SET ended = $1, winners = $2 WHERE guild = $3 AND channel = $4 AND message = $5\",\n True,\n self.winners,\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n\n async def end(self):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return await self._end()\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(self.winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_host:\n await self.dm_host(guild, winners, config.dm_host_message)\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.end_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def reroll(self, winner_count: int):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.reroll_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def cancel(self):\n await self.bot.pool.execute(\n \"\"\"DELETE FROM giveaways WHERE guild = $1 AND channel = $2 AND message = $3\"\"\",\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n if self.extra_message_id is not None:\n channel = self.bot.get_channel(self.channel_id)\n if channel is not None:\n await channel.get_partial_message(self.extra_message_id).delete() # type: ignore\n\n async def dm_host(\n self, guild: discord.Guild, winners: List[discord.Member], message: str\n ) -> None:\n host = await self.bot.get_or_fetch_member(guild, self.host_id)\n if not host:\n return\n\n description = safe_format(\n message,\n winners=\", \".join(winner.mention for winner in winners)\n if winners\n else \"No Winners\",\n prize=bold(self.prize),\n )\n\n embed = discord.Embed(\n title=f\"Your giveaway for {self.prize} has ended!\"[:256],\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await host.send(embed=embed, view=view)\n\n async def dm_winners(self, message: str, winners: List[discord.Member]) -> None:\n for winner in winners:\n description = safe_format(\n message, winner=winner.mention, prize=bold(self.prize)\n )\n\n embed = discord.Embed(\n title=\"You won!\",\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await winner.send(embed=embed, view=view)\n\n async def pick_winners(\n self, count: int, guild: discord.Guild\n ) -> List[discord.Member]:\n winners = []\n\n participants = self.participants.copy()\n\n while count > 0 and participants:\n member_id = random.choice(participants)\n member = await self.bot.get_or_fetch_member(guild, member_id)\n if member is not None and member not in winners:\n try:\n await self.check_requirements(member)\n except GiveawayError:\n pass\n else:\n winners.append(member)\n count -= 1\n\n participants.remove(member_id)\n\n return winners\n\n def get_end_embed(self, guild: discord.Guild, config: GuildConfig) -> discord.Embed:\n description = (\n f\"This giveaway has ended!\\n\"\n f\"Hosted By: <@!{self.host_id}>\\n\"\n f\"Winners: {', '.join(f'<@!{winner_id}>' for winner_id in self.winners) if self.winners else 'No Winners'}\\n\"\n f\"Ended: {discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='R')} ({discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='f')})\\n\"\n )\n if self.donor_id:\n description += f\"Donor: <@!{self.donor_id}>\\n\"\n embed = discord.Embed(\n title=self.prize,\n description=description,\n colour=config.color,\n timestamp=self.ends,\n )\n embed.set_footer(\n text=f\"{self.winner_count} winner(s) • Ended\",\n icon_url=guild.icon or self.bot.user.display_avatar,\n )\n\n requirements = \"\"\n if self.required_roles:\n requirements += f\"Required Roles: {', '.join(f'<@&{role_id}>' for role_id in self.required_roles)}\\n\"\n if self.bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(f'<@&{role_id}>' for role_id in self.bypass_roles)}\\n\"\n if self.blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(f'<@&{role_id}>' for role_id in self.blacklisted_roles)}\\n\"\n if self.messages_required:\n requirements += f\"Messages Required: **{self.messages_required}** message(s) (5s cooldown)\\n\"\n if self.allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{cid}>' for cid in self.allowed_message_channels)}\\n\"\n if self.amari:\n requirements += f\"Amari Level: {self.amari}\\n\"\n if self.weekly_amari:\n requirements += f\"Weekly Amari: {self.weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if self.multiplier_roles:\n multiplier_roles = \"\\n\".join(\n [\n f\"- {multiplier_entries}x ・ <@&{multiplier_role}>\"\n for multiplier_role, multiplier_entries in self.multiplier_roles.items()\n ]\n )\n embed.add_field(name=\"Bonus Entries\", value=multiplier_roles, inline=False)\n\n return embed" }, { "identifier": "Raffle", "path": "models/raffles.py", "snippet": "class Raffle:\n \"\"\"\n Represents a raffle object.\n\n Attributes\n ----------\n pool: asyncpg.Pool\n The PostgreSQL connection pool instance.\n guild: discord.Guild\n The guild (server) where the raffle is hosted.\n name: str\n The name of the raffle.\n winner: Optional[discord.Member]\n The member instance of the winner, or None if the raffle hasn't ended yet.\n deputy_roles: List[discord.Role]\n A list of roles associated with the raffle.\n deputy_members: List[discord.Member]\n A list of members associated with the raffle.\n tickets: Dict[discord.Member, int]\n A mapping of members to the number of tickets they have.\n \"\"\"\n\n def __init__(\n self,\n pool: asyncpg.Pool,\n *,\n guild: discord.Guild,\n name: str,\n winner: Optional[discord.Member],\n deputy_roles: List[discord.Role],\n deputy_members: List[discord.Member],\n tickets: Dict[discord.Member, int],\n ):\n self.pool = pool\n\n self.guild = guild\n self.name = name\n self.winner = winner\n self.deputy_roles = deputy_roles\n self.deputy_members = deputy_members\n self.tickets = tickets\n\n def __str__(self):\n return self.name\n\n def __repr__(self) -> str:\n return f\"<Raffle name={self.name} guild={self.guild} winner={self.winner}>\"\n\n def __hash__(self) -> int:\n return hash((self.name, self.guild))\n\n def __eq__(self, other: Raffle) -> bool:\n return self.name == other.name and self.guild == other.guild\n\n @classmethod\n async def from_record(cls, bot: Giftify, *, record: asyncpg.Record) -> Raffle:\n name = record[\"name\"]\n guild = bot.get_guild(record[\"guild\"])\n if guild is None:\n raise RaffleError(\"The guild having the raffle was not found.\")\n\n winner_id = record[\"winner\"]\n winner: Optional[discord.Member] = (\n (await bot.get_or_fetch_member(guild, winner_id) or FakeMember(winner_id))\n if winner_id\n else None\n ) # type: ignore\n\n deputy_roles = [guild.get_role(role_id) for role_id in record[\"deputy_roles\"]]\n deputy_members = [\n await bot.get_or_fetch_member(guild, member_id)\n for member_id in record[\"deputy_members\"]\n ]\n\n tickets = {\n await bot.get_or_fetch_member(guild, int(member_id)): num_tickets\n for member_id, num_tickets in record[\"tickets\"].items()\n }\n\n return cls(\n bot.pool,\n guild=guild,\n name=name,\n winner=winner,\n deputy_roles=filter_none(deputy_roles),\n deputy_members=filter_none(deputy_members),\n tickets=filter_none(tickets),\n )\n\n async def roll(self) -> discord.Member:\n \"\"\"\n End the raffle and set the winner.\n \"\"\"\n members = list(self.tickets.keys())\n weights = list(self.tickets.values())\n\n self.winner = random.choices(members, weights, k=1)[0]\n\n await self.save()\n\n return self.winner\n\n async def add_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Add a deputy to the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be added.\n \"\"\"\n if isinstance(obj, discord.Member):\n if len(self.deputy_members) >= 25:\n raise RaffleError(\"You cannot add more than 25 deputy members.\")\n self.deputy_members.append(obj)\n elif isinstance(obj, discord.Role):\n if len(self.deputy_roles) >= 10:\n raise RaffleError(\"You cannot add more than 10 deputy roles.\")\n self.deputy_roles.append(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def remove_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Remove a deputy from the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be removed.\n \"\"\"\n if isinstance(obj, discord.Member):\n if obj not in self.deputy_members:\n raise RaffleError(\"That member is not a deputy.\")\n self.deputy_members.remove(obj)\n elif isinstance(obj, discord.Role):\n if obj not in self.deputy_roles:\n raise RaffleError(\"That role is not a deputy.\")\n self.deputy_roles.remove(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def add_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Add tickets to a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to add.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] += num_tickets\n else:\n self.tickets[member] = num_tickets\n\n await self.save()\n\n async def remove_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Remove tickets from a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to remove.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] -= num_tickets\n if self.tickets[member] <= 0:\n del self.tickets[member]\n\n await self.save()\n else:\n raise RaffleError(\n f\"That member does not have any tickets in {self.name} raffle.\"\n )\n\n async def save(self) -> None:\n \"\"\"\n Update raffle attributes in the database.\n \"\"\"\n query = \"\"\"\n INSERT INTO raffles (guild, name, winner, deputy_roles, deputy_members, tickets)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (guild, name)\n DO UPDATE SET winner = EXCLUDED.winner, deputy_roles = EXCLUDED.deputy_roles,\n deputy_members = EXCLUDED.deputy_members, tickets = EXCLUDED.tickets;\n \"\"\"\n await self.pool.execute(\n query,\n self.guild.id,\n self.name,\n self.winner.id if self.winner else None,\n [role.id for role in self.deputy_roles],\n [member.id for member in self.deputy_members],\n {\n str(member.id): num_tickets\n for member, num_tickets in self.tickets.items()\n },\n )\n\n async def delete(self):\n \"\"\"\n Delete the raffle from the database.\n \"\"\"\n query = \"\"\"DELETE FROM raffles WHERE guild = $1 AND name = $2\"\"\"\n await self.pool.execute(query, self.guild.id, self.name)" }, { "identifier": "ERROR_EMOJI", "path": "utils/constants.py", "snippet": "ERROR_EMOJI = \"<:GiftifyError:1117842868057423914>\"" }, { "identifier": "SUCCESS_EMOJI", "path": "utils/constants.py", "snippet": "SUCCESS_EMOJI = \"<:GiftifySuccess:1100674526318166048>\"" }, { "identifier": "WARN_EMOJI", "path": "utils/constants.py", "snippet": "WARN_EMOJI = \"<:GiftifyWarn:1098498926564356106>\"" }, { "identifier": "db_init", "path": "utils/db.py", "snippet": "async def db_init(connection: asyncpg.Connection) -> None:\n await connection.set_type_codec(\n \"jsonb\", schema=\"pg_catalog\", encoder=_encode_jsonb, decoder=_decode_jsonb\n )" }, { "identifier": "CommandTree", "path": "utils/tree.py", "snippet": "class CommandTree(app_commands.CommandTree):\r\n client: \"Giftify\"\r\n\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r\n view = discord.ui.View()\r\n\r\n button = discord.ui.Button(label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\")\r\n\r\n view.add_item(button)\r\n\r\n if not interaction.response.is_done():\r\n await interaction.response.defer(thinking=True, ephemeral=True)\r\n\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n color=discord.Colour.red(),\r\n )\r\n\r\n if isinstance(error, app_commands.CommandInvokeError):\r\n if isinstance(error, MaxChannelConfigCreationError):\r\n embed.description = (\r\n f\"{WARN_EMOJI} You cannot setup configuration for more than 25 channels, please try removing some.\"\r\n )\r\n elif isinstance(error, discord.HTTPException):\r\n embed.description = f\"{WARN_EMOJI} Unknown HTTP error occured!\"\r\n else:\r\n embed.description = (\r\n f\"{WARN_EMOJI} An unknown error occurred , my developers have been notified about this error.\"\r\n )\r\n self.client.log_handler.log.exception(\"Exception occurred in the CommandTree:\\n\", exc_info=error)\r\n sentry_sdk.capture_exception(error)\r\n elif isinstance(error, app_commands.TransformerError):\r\n if isinstance(error, TransformerError):\r\n embed.description = f\"{WARN_EMOJI} {error.message}\"\r\n else:\r\n embed.description = f\"{WARN_EMOJI} {str(error)}\"\r\n\r\n elif isinstance(error, app_commands.MissingPermissions):\r\n missing = [perm.replace(\"_\", \" \").replace(\"guild\", \"server\").title() for perm in error.missing_permissions]\r\n\r\n format = \"\\n> \".join(missing)\r\n\r\n embed.description = (\r\n f\"{WARN_EMOJI} You are missing follwing permission(s) to run this command: \\n\\n> {format}\"\r\n )\r\n\r\n elif isinstance(error, app_commands.BotMissingPermissions):\r\n missing = [perm.replace(\"_\", \" \").replace(\"guild\", \"server\").title() for perm in error.missing_permissions]\r\n\r\n format = \"\\n> \".join(missing)\r\n\r\n embed.description = f\"{WARN_EMOJI} I am missing follwing permission(s) to run this command: \\n\\n > {format}\"\r\n\r\n elif isinstance(error, app_commands.CommandOnCooldown):\r\n cooldown = int(error.cooldown.per)\r\n retry_after = int(error.retry_after)\r\n embed.description = f\"{WARN_EMOJI} The cooldown for this command is **{cooldown}s**. Try running the command again after **{retry_after}s**.\"\r\n\r\n elif isinstance(error, app_commands.CommandNotFound):\r\n embed.description = f'{WARN_EMOJI} The command \"{error.name}\" was not found.'\r\n elif isinstance(error, DonationError):\r\n embed.description = f\"{WARN_EMOJI} {str(error)}\"\r\n elif isinstance(error, app_commands.CheckFailure):\r\n if isinstance(error, (DonationCategoryError, DonationPermissionsError)):\r\n embed.description = f\"{WARN_EMOJI} {str(error.message)}\"\r\n else:\r\n return\r\n else:\r\n embed.description = (\r\n f\"{WARN_EMOJI} An unknown error occured, my developers have been notified about this errors.\"\r\n )\r\n await interaction.followup.send(embed=embed, ephemeral=True)\r\n sentry_sdk.capture_exception(error)\r\n return self.client.log_handler.log.exception(\"Exception occurred in the CommandTree:\\n\", exc_info=error)\r\n\r\n return await interaction.followup.send(embed=embed, ephemeral=True)\r" }, { "identifier": "ConfirmationView", "path": "utils/view.py", "snippet": "class ConfirmationView(BaseView):\r\n def __init__(\r\n self,\r\n *,\r\n timeout: float,\r\n interaction: Interaction,\r\n success_message: str,\r\n cancel_message: str,\r\n ) -> None:\r\n super().__init__(timeout=timeout)\r\n self.interaction = interaction\r\n self.success_message = success_message\r\n self.cancel_message = cancel_message\r\n self.value: Optional[bool] = None\r\n\r\n @property\r\n def success_embed(self) -> discord.Embed:\r\n return discord.Embed(\r\n description=f\"{SUCCESS_EMOJI} {self.success_message}\",\r\n colour=discord.Colour.green(),\r\n )\r\n\r\n @property\r\n def cancel_embed(self) -> discord.Embed:\r\n return discord.Embed(\r\n description=f\"{SUCCESS_EMOJI} {self.cancel_message}\",\r\n colour=discord.Colour.green(),\r\n )\r\n\r\n async def interaction_check(self, interaction: Interaction) -> bool:\r\n if interaction.user and interaction.user.id == self.interaction.user.id:\r\n return True\r\n else:\r\n await interaction.response.send_message(\r\n \"This confirmation dialog is not for you.\", ephemeral=True\r\n )\r\n return False\r\n\r\n async def on_timeout(self) -> None:\r\n with contextlib.suppress(discord.HTTPException):\r\n for item in self.children:\r\n item.disabled = True\r\n await self.interaction.edit_original_response(view=self)\r\n\r\n @discord.ui.button(label=\"Confirm\", style=discord.ButtonStyle.green)\r\n async def confirm(self, interaction: Interaction, button: discord.ui.Button):\r\n self.value = True\r\n await interaction.response.edit_message(embed=self.success_embed, view=None)\r\n self.stop()\r\n\r\n @discord.ui.button(label=\"Cancel\", style=discord.ButtonStyle.red)\r\n async def cancel(self, interaction: Interaction, button: discord.ui.Button):\r\n self.value = False\r\n await interaction.response.edit_message(embed=self.cancel_embed, view=None)\r\n\r\n self.stop()\r" } ]
import asyncio import datetime import logging import os import pathlib import sys import traceback import aiohttp import asyncpg import discord import dotenv import jishaku import sentry_sdk import uvloop from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from amari import AmariClient from discord.ext import commands from discord.utils import MISSING from discord.utils import _ColourFormatter as ColourFormatter from expiringdict import ExpiringDict from sentry_sdk.integrations.logging import LoggingIntegration from models.giveaway_settings import GuildConfig from models.giveaways import Giveaway from models.raffles import Raffle from utils.constants import ERROR_EMOJI, SUCCESS_EMOJI, WARN_EMOJI from utils.db import db_init from utils.tree import CommandTree from utils.view import ConfirmationView from cogs.timer_manager import TimerManager from models.donation_settings import GuildDonationConfig
13,742
try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {} raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300) pool: asyncpg.Pool user: discord.ClientUser amari_client: AmariClient """A helper class for Giftify's operations. This class provides methods to send interaction messages with embeds, fetch webhooks for a channel, and retrieve or fetch guild configuration. """ async def send( self, interaction: discord.Interaction, message: str, reason: str = "success", ephemeral: bool = True, view: discord.ui.View = MISSING, ) -> None: """Sends an interaction message with embed. Parameters ----------- interaction: discord.Interaction The interaction to respond to. message: str The response message to send. reason: str The reason to send the message, can be "warn", "error" or "success". ephemeral: bool If the response should be sent ephemerally. """
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {} raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300) pool: asyncpg.Pool user: discord.ClientUser amari_client: AmariClient """A helper class for Giftify's operations. This class provides methods to send interaction messages with embeds, fetch webhooks for a channel, and retrieve or fetch guild configuration. """ async def send( self, interaction: discord.Interaction, message: str, reason: str = "success", ephemeral: bool = True, view: discord.ui.View = MISSING, ) -> None: """Sends an interaction message with embed. Parameters ----------- interaction: discord.Interaction The interaction to respond to. message: str The response message to send. reason: str The reason to send the message, can be "warn", "error" or "success". ephemeral: bool If the response should be sent ephemerally. """
emoji = WARN_EMOJI if reason == "warn" else ERROR_EMOJI if reason == "error" else SUCCESS_EMOJI
4
2023-11-09 15:00:15+00:00
16k
Zjy0401/CoCoFormer
train.py
[ { "identifier": "create_jsf_datasets", "path": "dataset/jsf.py", "snippet": "def create_jsf_datasets(dataset_root, max_seq, random_seq=True):\n\n train_root = os.path.join(dataset_root, \"train\")\n # val_root = os.path.join(dataset_root, \"val\")\n test_root = os.path.join(dataset_root, \"test\")\n\n train_dataset = MultiJSFDataset(train_root, max_seq, random_seq)\n # val_dataset = JSFDataset(val_root, max_seq, random_seq)\n test_dataset = MultiJSFDataset(test_root, max_seq, random_seq)\n\n return train_dataset, test_dataset" }, { "identifier": "CoCoformer", "path": "model/CoCoFormer.py", "snippet": "class CoCoformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(CoCoformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.c_max_seq = c_max_seq\n self.b_max_seq = b_max_seq\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n\n # past layer of chord\n self.cpast_layer_dmodel = d_model\n self.cpast_layer_nhead = 8\n self.cpast_dim_forward = 256\n self.cpast_layer_max_seq = 256\n self.cpast_layer_nlayers = 1\n\n # past layer of beats\n self.bpast_layer_dmodel = d_model\n self.bpast_layer_nhead = 8\n self.bpast_dim_forward = 256\n self.bpast_layer_max_seq = 1024\n self.bpast_layer_nlayers = 1\n\n # Input embedding\n self.n_embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n self.c_embedding = nn.Embedding(VOCAB_SIZE, self.cpast_layer_dmodel)\n self.b_embedding = nn.Embedding(VOCAB_SIZE, self.bpast_layer_dmodel)\n # Positional encoding\n self.n_positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n self.c_positional_encoding = PositionalEncoding(self.cpast_layer_dmodel, self.dropout, self.cpast_layer_max_seq)\n self.b_positional_encoding = PositionalEncoding(self.bpast_layer_dmodel, self.dropout, self.bpast_layer_max_seq)\n\n # Base transformer\n if not self.rpr:\n # To make a decoder-only transformer we need to use masked encoder layers\n # Dummy decoder to essentially just return the encoder output\n encoder_norm = LayerNorm(self.d_model)\n encoder_past_layer = TransformerEncoderPastLayer(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout)\n encoder_layer = TransformerEncoderLayer(self.d_model, self.nhead, self.d_ff, self.dropout)\n encoder = TransformerEncoder(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq, self.c_max_seq,\n self.b_max_seq, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_encoder=encoder, custom_decoder=self.dummy\n )\n # RPR Transformer\n elif self.rpr:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout,\n er_len=self.max_seq)\n encoder_past_layer = TransformerEncoderLayerRPR_(self.cpast_layer_dmodel, self.cpast_layer_nhead,\n self.cpast_dim_forward, self.bpast_layer_dmodel,\n self.bpast_layer_nhead, self.bpast_dim_forward,\n self.d_model, self.nhead,\n self.d_ff, self.dropout, er_len=self.max_seq)\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_past_layer, self.max_seq,\n self.c_max_seq, self.b_max_seq, encoder_norm)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n\n # Final output is a softmaxed linear layer\n # TODO: verify the size of linear\n self.Norm1 = nn.LayerNorm(1024)\n self.ReLU = nn.ReLU()\n self.Norm2 = nn.LayerNorm(181)\n self.Dropout = nn.Dropout(dropout)\n self.transLinear = nn.Linear(256, 256)\n self.Wout1 = nn.Linear(self.d_model, 1024)\n self.Wout2 = nn.Linear(1024, 1024)\n self.Wout3 = nn.Linear(1024, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n def _reset_parameters(self):\n r\"\"\"Initiate parameters in the transformer model.\"\"\"\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)\n\n # forward\n def forward(self, x1, x2, x3, mask=True):\n\n args = parse_train_args()\n # for pure-Transformer:\n # Transformer module:\n if mask is True:\n if args.gpu[0] != -1:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cuda(device=args.gpu[0])\n else:\n mask = self.transformer.generate_square_subsequent_mask(x1.shape[1]).cpu()\n else:\n mask = None\n # Input shape is (max_seq, batch_size, d_model)\n x_n = self.n_embedding(x1)\n x_n = x_n.permute(1, 0, 2)\n x_n = self.n_positional_encoding(x_n)\n\n x_c = self.c_embedding(x2)\n x_c = x_c.permute(1, 0, 2)\n x_c = self.c_positional_encoding(x_c)\n\n x_b = self.b_embedding(x3)\n x_b = x_b.permute(1, 0, 2)\n x_b = self.b_positional_encoding(x_b)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=torch.cat((x_n, x_c, x_b), dim=0), tgt=x_n,\n src_mask=mask)\n # x_out = self.transformer(src=x_transformer, tgt=x_transformer, src_mask=mask)\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n # concat\n # x_concat = torch.cat([x_out, x_out2], dim=1)\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout1(x_out))))\n y = self.Dropout(self.Norm1(self.ReLU(self.Wout2(y))))\n y = self.Wout3(y)\n # y = self.Wout2(y)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y\n\n # unconditional generate\n def generate(self, primer=None, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n\n print(\"Generating sequence of max length:\", target_seq_length)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n while cur_i < target_seq_length:\n # gen_seq_batch = gen_seq.clone()\n y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n\n # Let the transformer decide to end if it wants to\n # if next_token == TOKEN_END:\n # print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n # break\n\n cur_i += 1\n if cur_i % 50 == 0:\n print(cur_i, \"/\", target_seq_length)\n\n return gen_seq[:, :cur_i]\n\n # conditional generate\n def conditional_generate(self, beats, chord, seq, c, bs, ba, bt, bb, target_seq_length=1024, beam=0, beam_chance=1.0):\n\n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n chord = torch.tensor(chord, device=get_device()).unsqueeze(0)\n beats = torch.tensor(beats, device=get_device()).unsqueeze(0)\n\n gen_seq = torch.full((1, target_seq_length), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n primer = torch.tensor([c[0], bs[0], seq[0], ba[0]])\n primer_num = 1 # decide key to add\n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n\n # print(\"primer:\",primer)\n # print(gen_seq)\n cur_i = num_primer\n # first input: C B N B\n cur_i_n = 1\n cur_i_b = 2\n cur_i_c = 1\n check_error = 0\n pbar = tqdm(total=len(seq)*9)\n while cur_i < target_seq_length:\n a = gen_seq[..., :cur_i].cpu().numpy()\n # gen_seq_batch = gen_seq.clone()\n # print(\"input:\", gen_seq[..., :cur_i], chord[..., :cur_i_c], beats[..., :cur_i_b])\n y = self.softmax(self.forward(gen_seq[..., :cur_i], chord[..., :cur_i_c],\n beats[..., :cur_i_b]))[..., :len(self.word2event)]\n token_probs = y[:, cur_i - 1, :]\n # check for y\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n if check_error > 256:\n print(\"error! regenerate!\")\n return False\n # next token is the next token\n if cur_i % 9 == 1: # token is chord, next token must be beats\n if not 178 < next_token < 191: # if it is not beat\n check_error += 1\n continue\n if cur_i % 9 in [2, 4, 6, 8]: # this token must be beat, next token must be note\n if not next_token < 129: # if it is not note\n check_error += 1\n continue\n else: # this token must be note, next token must be chord or beat\n if not 128 < next_token < 191: # if it is chord or beat\n check_error += 1\n continue\n\n if beam == 0:\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0, 1)\n\n if beam_ran <= beam_chance:\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n\n beam_rows = top_i // VOCAB_SIZE\n beam_cols = top_i % VOCAB_SIZE\n\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n\n else:\n # print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n cur_i += 1\n pbar.update(1)\n cur_i_n += 1\n if cur_i % 9 == 0 and primer_num < len(seq):\n # add C B_S N_S B_A\n gen_seq[:, cur_i] = chord.squeeze()[primer_num]\n gen_seq[:, cur_i+1] = torch.tensor(bs[primer_num], device=get_device())\n gen_seq[:, cur_i+2] = torch.tensor(seq[primer_num], device=get_device())\n gen_seq[:, cur_i+3] = torch.tensor(ba[primer_num], device=get_device())\n primer_num += 1\n cur_i += 4\n pbar.update(4)\n cur_i_n += 1\n cur_i_b += 2\n cur_i_c += 1\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if cur_i % 9 != 0 and cur_i % 9 != 4 and primer_num < len(seq) + 1:\n # add B\n gen_seq[:, cur_i] = beats.squeeze()[cur_i_b]\n cur_i_b += 1\n cur_i_n += 1\n cur_i += 1\n pbar.update(1)\n # a = gen_seq[..., :cur_i].cpu().numpy()\n if primer_num == len(seq) and cur_i == len(seq) * 9:\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n # print(cur_i, \"/\", target_seq_length)\n\n print(\"all errors:%d\" % check_error)\n return gen_seq[:, :cur_i]" }, { "identifier": "Discriminator", "path": "model/CoCoFormer.py", "snippet": "class Discriminator(nn.Module):\n \"\"\"\n to judge the true sample or fake\n return fake or true\n \"\"\"\n def __init__(self, input_emb=1, d_model=256, nhead=4, d_ff=512, dropout=0.5, out_emb=1024):\n super(Discriminator, self).__init__()\n self.linear1 = nn.Linear(input_emb, d_model)\n self.transformer = TransformerEncoderLayer(d_model, nhead, d_ff, dropout)\n self.linear2 = nn.Linear(d_model, out_emb)\n self.relu = nn.LeakyReLU(negative_slope=0.01, inplace=False)\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.Norm1 = nn.LayerNorm(d_model)\n self.Norm2 = nn.LayerNorm(out_emb)\n self.dropout = nn.Dropout(dropout)\n self.sigmoid = nn.Sigmoid()\n self.loss = nn.BCELoss()\n\n def forward(self, x, labels):\n x = x.float().unsqueeze(2)\n x = self.dropout(self.Norm1(self.linear1(x)))\n x = self.transformer(x)\n logits = self.dropout(self.Norm2(self.linear2(x)))\n logits = self.sigmoid(self.relu(self.maxpool(logits)))\n logits = logits.reshape(logits.shape[0] * logits.shape[1], -1)\n labels = labels.reshape(logits.shape[0] * logits.shape[1], -1)\n loss = self.loss(logits, labels)\n\n # import numpy as np\n # logits = logits.cpu().detach().numpy()\n # labels = labels.cpu().detach().numpy()\n # loss = []\n # for i in logits:\n # loss.append(np.log(1-1/(1+np.exp(i[0]))))\n output = (loss, logits)\n\n return output\n\n def _reset_parameters(self):\n\n for p in self.parameters():\n if p.dim() > 1:\n xavier_uniform_(p)" }, { "identifier": "PureTransformer", "path": "model/CoCoFormer.py", "snippet": "class PureTransformer(nn.Module):\n\n def __init__(self, word2event, event2word, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence=2048, c_max_seq=256, b_max_seq=1024, rpr=False):\n super(PureTransformer, self).__init__()\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq = max_sequence\n self.rpr = rpr\n # word2event and event2word:\n self.word2event = word2event\n self.event2word = event2word\n # Input embedding\n self.embedding = nn.Embedding(VOCAB_SIZE, self.d_model)\n\n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq)\n\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, VOCAB_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, mask=True):\n\n if mask is True:\n mask = self.transformer.generate_square_subsequent_mask(x[0].shape[1]).to(get_device())\n else:\n mask = None\n\n x = self.embedding(x)\n\n # Input shape is (max_seq, batch_size, d_model)\n x = x.permute(1, 0, 2)\n\n x = self.positional_encoding(x)\n\n # Since there are no true decoder layers, the tgt is unused\n # Pytorch wants src and tgt to have some equal dims however\n x_out = self.transformer(src=x, tgt=x, src_mask=mask)\n\n # Back to (batch_size, max_seq, d_model)\n x_out = x_out.permute(1, 0, 2)\n\n y = self.Wout(x_out)\n # y = self.softmax(y)\n\n del mask\n\n # They are trained to predict the next note in sequence (we don't need the last one)\n return y" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-input_dir\", type=str, default=\"./dataset/dataset/JSF_SATB\", help=\"Folder of preprocessed and pickled midi files\")\n parser.add_argument(\"-output_dir\", type=str, default=\"./baseline_3loss\", help=\"Folder to save model weights. Saves one every epoch\")\n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-word2event\", type=str, default='./dataset/word2event.pkl', help='word table location: *.pkl')\n parser.add_argument(\"-n_workers\", type=int, default=2, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--gpu\", default=[2], nargs='+', type=int, help=\"For Multi-GPUs training\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument('--scheduled_sampling', default=False, help='False means use teacher forcing, True means use scheduled_sampling')\n parser.add_argument(\"--scheduled_sampling_change_ratio\", default=0.5, type=int, help='ratio about mix golden target with output')\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=2, help=\"Batch size per gpu to use\")\n parser.add_argument(\"-epochs\", type=int, default=300, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-adv_train\", default=True, help='add discriminator loss')\n parser.add_argument(\"-only_Transformer\", default=False, help='use pure Transformer, default set to false, True only for test')\n parser.add_argument(\"-loss\", default=[0.4, 0.2, 0.8], nargs='+', type=float, help='weights of loss, the last element effect when adv train is True')\n\n parser.add_argument(\"--rpr\", action=\"store_true\", help=\"Use a modified Transformer for Relative Position Representations\")\n parser.add_argument(\"-max_sequence\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"--metrics\", default=False, help=\"evaluate TER(token error rate)\")\n\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n\n print(SEPERATOR)\n print(\"input_dir:\", args.input_dir)\n print(\"output_dir:\", args.output_dir)\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n print(\"max_sequence:\", args.max_sequence)\n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n o_stream.write(\"max_sequence: \" + str(args.max_sequence) + \"\\n\")\n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n # with torch.no_grad():\n # y1 = model(x[1])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model(x[0])\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n loss2.backward()\n # out = args.loss[0] * loss1 + args.loss[1] * loss2\n\n opt.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train total loss:\", float(loss2),\n \"Time (s):\", time_took)\n\n return" }, { "identifier": "train_with_adv", "path": "utilities/run_model.py", "snippet": "def train_with_adv(cur_epoch, model, model_disc, dataloader, loss, opt, opt_disc,\n lr_scheduler=None, lr_disc_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n out = -1\n start_epoch = 5\n model.train()\n model_disc.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n opt_disc.zero_grad()\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n with torch.no_grad():\n y1 = model.module(x[1][0], x[1][1], x[1][2])\n y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n # discriminator model loss:\n if args.gpu[0] != -1:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1).to(args.gpu[0])\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1).to(args.gpu[0])\n else:\n real_disc_label = torch.ones(len(batch[0]), batch[1][0][0].shape[1], 1)\n fake_disc_label = torch.zeros(len(batch[0]), y2.shape[1], 1)\n\n softmax = nn.Softmax(dim=-1)\n d_fake_loss, d_fake_logits = model_disc(torch.argmax(softmax(y2), dim=-1), fake_disc_label)\n d_real_loss, d_real_logits = model_disc(batch[1][0][0], real_disc_label)\n loss3 = d_fake_loss + d_real_loss\n # y3 = model(x[2])\n # train for only CT\n # y = model(x)\n\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n # tgt = tgt.flatten()\n # add scheduled sampling\n # out = loss.forward(y, tgt)\n\n # out = loss3\n out = args.loss[0] * loss1 + args.loss[1] * loss2 + args.loss[2] * loss3\n\n out.backward()\n opt.step()\n opt_disc.step()\n if lr_scheduler is not None:\n lr_scheduler.step()\n if lr_disc_scheduler is not None:\n lr_disc_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt_disc),\n \"Train total loss:\", float(out), \"Train loss1:\", float(loss1), \"Train loss2:\", float(loss2),\n \"Train loss3:\", float(loss3), \"Time (s):\", time_took)\n\n return" }, { "identifier": "eval_model", "path": "utilities/run_model.py", "snippet": "def eval_model(model, dataloader, loss):\n\n model.eval()\n args = parse_train_args()\n avg_acc = -1\n avg_loss = -1\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n sum_loss = 0.0\n sum_acc = 0.0\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n x[i] = x[i].cpu()\n tgt[i] = tgt[i].cpu()\n tgt = tgt[0][0]\n tgt = tgt.flatten()\n\n # with torch.no_grad():\n # y1 = model(x[0])\n # y1 = y1.reshape(y1.shape[0] * y1.shape[1], -1)\n # loss1 = loss.forward(y1, tgt)\n y2 = model.module(x[0][0], x[0][1], x[0][2])\n y2 = y2.reshape(y2.shape[0] * y2.shape[1], -1)\n loss2 = loss.forward(y2, tgt)\n out = loss2\n\n sum_acc += float(compute_jsf_accuracy(y2, tgt))\n\n # y = y.reshape(y.shape[0] * y.shape[1], -1)\n # tgt = tgt.flatten()\n\n # out = loss.forward(y, tgt)\n\n sum_loss += float(out)\n\n avg_loss = sum_loss / n_test\n avg_acc = sum_acc / n_test\n\n return avg_loss, avg_acc" }, { "identifier": "get_metrics", "path": "utilities/run_model.py", "snippet": "def get_metrics(model, dataloader):\n \"\"\"\n Calculate TER: token error rate\n \"\"\"\n args = parse_eval_args()\n model.eval()\n # TER\n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n c_acc, Ns_acc, Bs_acc, Na_acc, Ba_acc, Nt_acc, Bt_acc, Nb_acc, Bb_acc = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ter = []\n for batch in tqdm.tqdm(dataloader):\n x = batch[0]\n tgt = batch[1]\n for i in range(len(batch[0])):\n if args.gpu[0] != -1:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cuda(device=args.gpu[0])\n if isinstance(x[i], torch.Tensor):\n x[i] = x[i].cuda(device=args.gpu[0])\n\n if isinstance(tgt[i], list):\n for j in range(len(tgt[i])):\n tgt[i][j] = tgt[i][j].cuda(device=args.gpu[0])\n if isinstance(tgt[i], torch.Tensor):\n tgt[i] = tgt[i].cuda(device=args.gpu[0])\n else:\n if isinstance(x[i], list):\n for j in range(len(x[i])):\n x[i][j] = x[i][j].cpu()\n tgt[i][j] = tgt[i][j].cpu()\n\n y = model.module(x[0][0], x[0][1], x[0][2])\n # TER\n ter.append(compute_jsf_ter(y, tgt))\n\n for i in ter:\n c_acc += i[0]\n Bs_acc += i[1]\n Ns_acc += i[2]\n Ba_acc += i[3]\n Na_acc += i[4]\n Bt_acc += i[5]\n Nt_acc += i[6]\n Bb_acc += i[7]\n Nb_acc += i[8]\n TER = [c_acc / n_test, Bs_acc / n_test, Ns_acc / n_test, Ba_acc / n_test, Na_acc / n_test,\n Bt_acc / n_test, Nt_acc / n_test, Bb_acc / n_test, Nb_acc / n_test]\n # clear nan , or np.mean will only be nan if one is nan\n return TER" }, { "identifier": "train_with_pure_transformer", "path": "utilities/run_model.py", "snippet": "def train_with_pure_transformer(cur_epoch, model, dataloader, loss, opt, lr_scheduler=None, print_modulus=1):\n\n args = parse_train_args()\n\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n\n opt.zero_grad()\n\n x = batch[0][0][0].to(args.gpu[0])\n tgt = batch[1][0][0].to(args.gpu[0])\n\n y = model(x)\n\n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n out = loss.forward(y, tgt)\n\n out.backward()\n opt.step()\n\n if lr_scheduler is not None:\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n\n if (batch_num + 1) % print_modulus == 0:\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num + 1, \"/\", len(dataloader), \"LR:\", get_lr(opt),\n \"Train loss:\", float(out), \"Time (s):\", time_took)\n\n return" }, { "identifier": "params", "path": "utilities/run_model.py", "snippet": "def params(dataloader, model, model_disc):\n\n args = parse_train_args()\n model.eval()\n for batch_num, batch in enumerate(dataloader):\n flops, params = profile(model.module, (batch[0][0][0].cuda(args.gpu[0]),\n batch[0][0][1].cuda(args.gpu[0]),\n batch[0][0][2].cuda(args.gpu[0]))\n )\n print('flops:', flops, 'params:', params)\n break" } ]
import os import csv import shutil import torch import torch.nn as nn import pickle from thop import profile from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.jsf import create_jsf_datasets from model.CoCoFormer import CoCoformer, Discriminator, PureTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model import train_epoch, train_with_adv, eval_model, get_metrics, train_with_pure_transformer, params from tensorboardX import SummaryWriter
11,813
##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD) ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) opt_disc = Adam(model_disc.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if args.lr is None: lr_scheduler = LambdaLR(opt, lr_stepper.step) lr_disc_scheduler = LambdaLR(opt_disc, lr_stepper.step) else: lr_scheduler = None lr_disc_scheduler = None ##### Tracking best evaluation accuracy ##### best_eval_acc = 0.0 best_eval_acc_epoch = -1 best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if not os.path.isfile(results_file): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): # Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense) if epoch > BASELINE_EPOCH: print(SEPERATOR) print("NEW EPOCH:", epoch + 1) print(SEPERATOR) print("") # Train if args.only_Transformer: train_with_pure_transformer(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) else: if args.adv_train: train_with_adv(epoch + 1, model, model_disc, train_loader, train_loss_func, opt, opt_disc, lr_scheduler, lr_disc_scheduler, args.print_modulus) else: train_epoch(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") if epoch != -1: # Eval train_loss, train_acc = eval_model(model, train_loader, train_loss_func) eval_loss, eval_acc = eval_model(model, test_loader, eval_loss_func) print("Epoch:", epoch + 1) if args.metrics: TER = get_metrics(model, test_loader) print("TER:", TER) # Learn rate
# from dataset.e_piano import create_epiano_datasets, compute_epiano_accuracy, split_train_test CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss", "Train Accuracy", "Avg Eval loss", "Eval accuracy"] # Baseline is an untrained epoch that we evaluate as a baseline loss and accuracy BASELINE_EPOCH = -1 # main def main(): """ ---------- Author: Damon Gwinn ---------- Entry point. Trains a model specified by command line arguments ---------- """ args = parse_train_args() print_train_args(args) if args.force_cpu: use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs(args.output_dir, exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, "results") os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_acc_file = os.path.join(results_folder, "best_acc_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if args.no_tensorboard: tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) ##### Datasets ##### # train_dataset, val_dataset, test_dataset = create_epiano_datasets(args.input_dir, args.max_sequence) train_dataset, test_dataset = create_jsf_datasets(args.input_dir, args.max_sequence) train_loader = DataLoader(train_dataset, batch_size=args.batch_size * len(args.gpu), num_workers=args.n_workers, shuffle=True) # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) test_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.n_workers) ##### read word2event event2word f = open(args.word2event, 'rb') word2event = pickle.load(f) # reverse the vector event2word event2word = {} for key, val in word2event.items(): event2word[val] = key if args.only_Transformer: model = PureTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) else: model = CoCoformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence=args.max_sequence, rpr=args.rpr, word2event=word2event, event2word=event2word) model_disc = Discriminator() if args.gpu[0] != -1: model = torch.nn.DataParallel(model, device_ids=args.gpu) model = model.cuda(device=args.gpu[0]) model_disc = torch.nn.DataParallel(model_disc, device_ids=args.gpu) model_disc = model_disc.cuda(device=args.gpu[0]) params(train_loader, model, model_disc) ##### Continuing from previous training session ##### start_epoch = BASELINE_EPOCH if args.continue_weights is not None: if args.continue_epoch is None: print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") return else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif args.continue_epoch is not None: print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") return ##### Lr Scheduler vs static lr ##### if args.lr is None: if args.continue_epoch is None: init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START * len(args.gpu) lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=TOKEN_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if args.ce_smoothing is None: train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, VOCAB_SIZE, ignore_index=TOKEN_PAD) ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) opt_disc = Adam(model_disc.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if args.lr is None: lr_scheduler = LambdaLR(opt, lr_stepper.step) lr_disc_scheduler = LambdaLR(opt_disc, lr_stepper.step) else: lr_scheduler = None lr_disc_scheduler = None ##### Tracking best evaluation accuracy ##### best_eval_acc = 0.0 best_eval_acc_epoch = -1 best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if not os.path.isfile(results_file): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): # Baseline has no training and acts as a base loss and accuracy (epoch 0 in a sense) if epoch > BASELINE_EPOCH: print(SEPERATOR) print("NEW EPOCH:", epoch + 1) print(SEPERATOR) print("") # Train if args.only_Transformer: train_with_pure_transformer(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) else: if args.adv_train: train_with_adv(epoch + 1, model, model_disc, train_loader, train_loss_func, opt, opt_disc, lr_scheduler, lr_disc_scheduler, args.print_modulus) else: train_epoch(epoch + 1, model, train_loader, train_loss_func, opt, lr_scheduler, args.print_modulus) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") if epoch != -1: # Eval train_loss, train_acc = eval_model(model, train_loader, train_loss_func) eval_loss, eval_acc = eval_model(model, test_loader, eval_loss_func) print("Epoch:", epoch + 1) if args.metrics: TER = get_metrics(model, test_loader) print("TER:", TER) # Learn rate
lr = get_lr(opt)
8
2023-11-01 08:33:08+00:00
16k
tiendatnguyen-vision/Orbit-symmetrize
ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/representation.py
[ { "identifier": "Group", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/groups.py", "snippet": "class Group(nn.Module):\n \"\"\" Abstract Group Object which new groups should inherit from. \"\"\"\n\n def __init__(self):\n super().__init__()\n self.lie_algebra = NotImplemented # The continuous generators\n self.discrete_generators = NotImplemented # The discrete generators\n self.z_scale = None # For scale noise for sampling elements\n self.is_orthogonal = None\n self.is_permutation = None\n self.d = NotImplemented # The dimension of the base representation\n self.device = torch.device('cpu')\n self.args = None\n\n def init(self, *args):\n \"\"\" Initialize the group object. \"\"\"\n # get the dimension of the base group representation\n if self.d is NotImplemented:\n if (self.lie_algebra is not NotImplemented) and \\\n len(self.lie_algebra) > 0:\n self.d = self.lie_algebra[0].size(-1)\n if (self.discrete_generators is not NotImplemented) and \\\n len(self.discrete_generators) > 0:\n self.d = self.discrete_generators[0].size(-1)\n\n self.args = args\n\n if self.lie_algebra is NotImplemented:\n self.lie_algebra = torch.zeros((0, self.d, self.d), device=self.device)\n if self.discrete_generators is NotImplemented:\n self.discrete_generators = torch.zeros((0, self.d, self.d), device=self.device)\n\n self.to(self.device)\n\n # set orthogonal flag automatically if not specified\n if self.is_permutation:\n self.is_orthogonal = True\n if self.is_orthogonal is None:\n self.is_orthogonal = True\n if len(self.lie_algebra) != 0:\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra])\n self.is_orthogonal &= rel_err(-A_dense.transpose(2, 1), A_dense) < 1e-6\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_orthogonal &= rel_err(h_dense.transpose(2, 1)@h_dense, Id[None]) < 1e-6\n\n # set regular flag automatically if not specified\n if self.is_orthogonal and (self.is_permutation is None):\n self.is_permutation = True\n # no infinitesmal generators and all rows have one 1\n self.is_permutation &= (len(self.lie_algebra) == 0)\n if len(self.discrete_generators) != 0:\n Id = torch.eye(self.d, device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators])\n self.is_permutation &= (((h_dense-1).abs()<1e-6).long().sum(-1) == 1).all()\n\n def exp(self, A):\n \"\"\" Matrix exponential \"\"\"\n return torch.linalg.matrix_exp(A)\n\n def num_constraints(self):\n \"\"\" Number of constraints to solve for the group \"\"\"\n return len(self.lie_algebra)+len(self.discrete_generators)\n\n def sample(self):\n \"\"\"Draw a sample from the group (not necessarily Haar measure)\"\"\"\n return self.samples(1)[0]\n\n def samples(self, N):\n \"\"\" Draw N samples from the group (not necessarily Haar measure)\"\"\"\n Id = torch.eye(self.d, device=self.device)\n A_dense = torch.stack([[email protected](Ai.dtype) for Ai in self.lie_algebra]) \\\n if len(self.lie_algebra) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n h_dense = torch.stack([[email protected](hi.dtype) for hi in self.discrete_generators]) \\\n if len(self.discrete_generators) \\\n else torch.zeros((0, self.d, self.d), device=self.device)\n z = torch.randn(N, A_dense.size(0), device=self.device)\n if self.z_scale is not None:\n z *= self.z_scale\n k = torch.randint(-MAX_POWER, MAX_POWER+1, (N, h_dense.size(0), 3), device=self.device)\n return noise2samples(z, k, A_dense, h_dense)\n\n def check_valid_group_elems(self, g):\n \"\"\" Check that the group elements are valid \"\"\"\n return True\n\n def __str__(self):\n return repr(self)\n\n def __repr__(self):\n outstr = f\"{self.__class__}\"\n if self.args:\n outstr += '('+''.join(repr(arg) for arg in self.args)+')'\n return outstr\n\n def __eq__(self, G2): # TODO: more permissive by checking that spans are equal?\n return repr(self) == repr(G2)\n\n def __hash__(self):\n return hash(repr(self))\n\n def __lt__(self, other):\n \"\"\" For sorting purposes only \"\"\"\n return hash(self) < hash(other)\n\n def __mul__(self, other):\n return DirectProduct(self, other)\n\n def forward(self):\n \"\"\" Forward method, unused. \"\"\"\n return None\n\n def to(self, *args, **kwargs):\n \"\"\" Move the group to the specified device \"\"\"\n if isinstance(self.lie_algebra, torch.Tensor):\n self.lie_algebra = self.lie_algebra.to(*args, **kwargs)\n elif isinstance(self.lie_algebra, list):\n self.lie_algebra = [Ai.to(*args, **kwargs) for Ai in self.lie_algebra]\n if isinstance(self.discrete_generators, torch.Tensor):\n self.discrete_generators = self.discrete_generators.to(*args, **kwargs)\n elif isinstance(self.discrete_generators, list):\n self.discrete_generators = [hi.to(*args, **kwargs) for hi in self.discrete_generators]\n if self.z_scale is not None:\n self.z_scale = self.z_scale.to(*args, **kwargs)\n self.device = torch.empty(0).to(*args, **kwargs).device\n return self" }, { "identifier": "LinearOperator", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operator_base.py", "snippet": "class LinearOperator(nn.Module):\n \"\"\" Common interface for performing matrix vector products\n Many iterative methods (e.g. cg, gmres) do not need to know the\n individual entries of a matrix to solve a linear system A*x=b.\n Such solvers only require the computation of matrix vector\n products, A*v where v is a dense vector. This class serves as\n an abstract interface between iterative solvers and matrix-like\n objects.\n To construct a concrete LinearOperator, either pass appropriate\n callables to the constructor of this class, or subclass it.\n A subclass must implement either one of the methods ``_matvec``\n and ``_matmat``, and the attributes/properties ``shape`` (pair of\n integers) and ``dtype`` (may be None). It may call the ``__init__``\n on this class to have these attributes validated. Implementing\n ``_matvec`` automatically implements ``_matmat`` (using a naive\n algorithm) and vice-versa.\n Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``\n to implement the Hermitian adjoint (conjugate transpose). As with\n ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or\n ``_adjoint`` implements the other automatically. Implementing\n ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for\n backwards compatibility.\n Parameters\n ----------\n shape : tuple\n Matrix dimensions (M, N).\n matvec : callable f(v)\n Returns returns A * v.\n rmatvec : callable f(v)\n Returns A^H * v, where A^H is the conjugate transpose of A.\n matmat : callable f(V)\n Returns A * V, where V is a dense matrix with dimensions (N, K).\n dtype : dtype\n Data type of the matrix.\n rmatmat : callable f(V)\n Returns A^H * V, where V is a dense matrix with dimensions (M, K).\n Attributes\n ----------\n args : tuple\n For linear operators describing products etc. of other linear\n operators, the operands of the binary operation.\n ndim : int\n Number of dimensions (this is always 2)\n See Also\n --------\n aslinearoperator : Construct LinearOperators\n Notes\n -----\n The user-defined matvec() function must properly handle the case\n where v has shape (N,) as well as the (N,1) case. The shape of\n the return type is handled internally by LinearOperator.\n LinearOperator instances can also be multiplied, added with each\n other and exponentiated, all lazily: the result of these operations\n is always a new, composite LinearOperator, that defers linear\n operations to the original operators and combines the results.\n More details regarding how to subclass a LinearOperator and several\n examples of concrete LinearOperator instances can be found in the\n external project `PyLops <https://pylops.readthedocs.io>`_.\n Examples\n --------\n >>> def mv(v):\n ... return torch.tensor([2*v[0], 3*v[1]])\n ...\n >>> A = LinearOperator((2,2), matvec=mv)\n >>> A\n <2x2 _CustomLinearOperator with dtype=float64>\n >>> A.matvec(torch.ones(2))\n tensor([ 2., 3.])\n >>> A * torch.ones(2)\n tensor([ 2., 3.])\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n if cls is LinearOperator:\n # Operate as _CustomLinearOperator factory.\n return super(LinearOperator, cls).__new__(_CustomLinearOperator)\n\n obj = super(LinearOperator, cls).__new__(cls)\n if (type(obj)._matvec == LinearOperator._matvec\n and type(obj)._matmat == LinearOperator._matmat):\n warnings.warn(\"LinearOperator subclass should implement\"\n \" at least one of _matvec and _matmat.\",\n category=RuntimeWarning, stacklevel=2)\n return obj\n\n def __init__(self):\n super().__init__()\n self.ndim = 2\n self.dtype = None\n self.shape = None\n self.device = None\n\n def init(self, dtype, shape, device):\n \"\"\" Initialize this LinearOperator.\n To be called by subclasses. ``dtype`` may be None; ``shape`` should\n be convertible to a length-2 tuple.\n Called from subclasses at the end of the __init__ routine.\n \"\"\"\n if dtype is None:\n dtype = torch.float # force float 32\n else:\n if not isinstance(dtype, torch.dtype):\n dtype = torch_dtype(dtype)\n\n shape = tuple(shape)\n if not isshape(shape):\n raise ValueError(f\"invalid shape {(shape,)} (must be 2-d)\")\n\n self.dtype = dtype\n self.shape = torch.Size(shape)\n self.device = torch_device(device)\n\n def size(self, dim=None):\n \"\"\" Return the size of this LinearOperator.\n This is a synonym for ``shape``.\n \"\"\"\n return self.shape if dim is None else self.shape[dim]\n\n def _matmat(self, V):\n \"\"\" Default matrix-matrix multiplication handler.\n Falls back on the user-defined _matvec method, so defining that will\n define matrix multiplication (though in a very suboptimal way).\n \"\"\"\n return torch.hstack([self.matvec(col.reshape(-1, 1)) for col in V.T])\n\n def _matvec(self, v):\n \"\"\" Default matrix-vector multiplication handler.\n If self is a linear operator of shape (M, N), then this method will\n be called on a shape (N,) or (N, 1) ndarray, and should return a\n shape (M,) or (M, 1) ndarray.\n This default implementation falls back on _matmat, so defining that\n will define matrix-vector multiplication as well.\n \"\"\"\n return self.matmat(v.reshape(-1, 1))\n\n def matvec(self, v):\n \"\"\" Matrix-vector multiplication.\n Performs the operation y=A*v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (N,) or (N,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (M,) or (M,1) depending\n on the type and shape of the x argument.\n Notes\n -----\n This matvec wraps the user-specified matvec routine or overridden\n _matvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n if v.shape != (N,) and v.shape != (N, 1):\n raise ValueError('dimension mismatch')\n\n y = self._matvec(v)\n\n if v.ndim == 1:\n y = y.reshape(M)\n elif v.ndim == 2:\n y = y.reshape(M, 1)\n else:\n raise ValueError('invalid shape returned by user-defined matvec()')\n\n return y\n\n def rmatvec(self, v):\n \"\"\" Adjoint matrix-vector multiplication.\n Performs the operation y = A^H * v where A is an MxN linear\n operator and v is a column vector or 1-d array.\n Parameters\n ----------\n v : {matrix, ndarray}\n An array with shape (M,) or (M,1).\n Returns\n -------\n y : {matrix, ndarray}\n A matrix or ndarray with shape (N,) or (N,1) depending\n on the type and shape of the v argument.\n Notes\n -----\n This rmatvec wraps the user-specified rmatvec routine or overridden\n _rmatvec method to ensure that y has the correct shape and type.\n \"\"\"\n M, N = self.shape\n\n if v.shape != (M,) and v.shape != (M, 1):\n raise ValueError('dimension mismatch')\n\n y = self._rmatvec(v)\n\n if v.ndim == 1:\n y = y.reshape(N)\n elif v.ndim == 2:\n y = y.reshape(N, 1)\n else:\n raise ValueError('invalid shape returned by user-defined rmatvec()')\n\n return y\n\n def _rmatvec(self, v):\n \"\"\" Default implementation of _rmatvec; defers to adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n # _adjoint not overridden, prevent infinite recursion\n raise NotImplementedError\n return self.H().matvec(v)\n\n def matmat(self, V):\n \"\"\" Matrix-matrix multiplication.\n Performs the operation y=A*V where A is an MxN linear\n operator and V dense N*K matrix or ndarray.\n Parameters\n ----------\n V : {matrix, ndarray}\n An array with shape (N,K).\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or ndarray with shape (M,K) depending on\n the type of the V argument.\n Notes\n -----\n This matmat wraps any user-specified matmat routine or overridden\n _matmat method to ensure that y has the correct type.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d ndarray or matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(1):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._matmat(V)\n return Y\n\n def rmatmat(self, V):\n \"\"\" Adjoint matrix-matrix multiplication.\n Performs the operation y = A^H * V where A is an MxN linear\n operator and V is a column vector or 1-d array, or 2-d array.\n The default implementation defers to the adjoint.\n Parameters\n ----------\n V : {matrix, ndarray}\n A matrix or 2D array.\n Returns\n -------\n Y : {matrix, ndarray}\n A matrix or 2D array depending on the type of the input.\n Notes\n -----\n This rmatmat wraps the user-specified rmatmat routine.\n \"\"\"\n if V.ndim != 2:\n raise ValueError(f'expected 2-d matrix, not {V.ndim}-d')\n\n if V.size(0) != self.size(0):\n raise ValueError(f'dimension mismatch: {self.shape}, {V.shape}')\n\n Y = self._rmatmat(V)\n return Y\n\n def _rmatmat(self, V):\n \"\"\" Default implementation of _rmatmat defers to rmatvec or adjoint. \"\"\"\n if type(self)._adjoint == LinearOperator._adjoint:\n return torch.hstack([self.rmatvec(col.reshape(-1, 1)) for col in V.T])\n return self.H().matmat(V)\n\n def forward(self, v):\n \"\"\" Matrix-vector or matrix-matrix multiplication. \"\"\"\n return self*v\n\n def __mul__(self, v):\n return self.dot(v)\n\n def dot(self, v):\n \"\"\" Matrix-matrix or matrix-vector multiplication.\n Parameters\n ----------\n v : array_like\n 1-d or 2-d array, representing a vector or matrix.\n Returns\n -------\n Av : array\n 1-d or 2-d array (depending on the shape of x) that represents\n the result of applying this linear operator on x.\n \"\"\"\n if isinstance(v, LinearOperator):\n return _ProductLinearOperator(self, v)\n if torch.is_tensor(v):\n if v.ndim == 0:\n return _ScaledLinearOperator(self, v)\n if v.ndim == 1 or v.ndim == 2 and v.size(1) == 1:\n return self.matvec(v)\n if v.ndim == 2:\n return self.matmat(v)\n raise ValueError(f'expected 1-d or 2-d array or matrix, got {v}')\n\n def __matmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__mul__(other)\n\n def __rmatmul__(self, other):\n if isscalar(other):\n raise ValueError(\"Scalar operands are not allowed, use '*' instead\")\n return self.__rmul__(other)\n\n def __rmul__(self, x):\n if isscalar(x):\n return _ScaledLinearOperator(self, x)\n return NotImplemented\n\n def __pow__(self, p):\n if isscalar(p):\n return _PowerLinearOperator(self, p)\n return NotImplemented\n\n def __add__(self, x):\n if isinstance(x, LinearOperator):\n return _SumLinearOperator(self, x)\n if torch.is_tensor(x) and x.ndim == 2:\n return _SumLinearOperator(self, Lazy(x))\n return NotImplemented\n\n def __radd__(self, x):\n return self.__add__(x)\n\n def __neg__(self):\n return _ScaledLinearOperator(self, -1)\n\n def __sub__(self, x):\n return self.__add__(-x)\n\n def __repr__(self):\n M, N = self.shape\n if self.dtype is None:\n dtype = 'unspecified dtype'\n else:\n dtype = 'dtype=' + str(self.dtype)\n\n return f'<{M}x{N} {self.__class__.__name__} with {dtype}>'\n\n def adjoint(self):\n \"\"\" Hermitian adjoint.\n Returns the Hermitian adjoint of self, aka the Hermitian\n conjugate or Hermitian transpose. For a complex matrix, the\n Hermitian adjoint is equal to the conjugate transpose.\n Can be abbreviated self.H instead of self.adjoint().\n Returns\n -------\n A_H : LinearOperator\n Hermitian adjoint of self.\n \"\"\"\n return self._adjoint()\n\n def H(self):\n \"\"\" Hermitian adjoint. \"\"\"\n return self.adjoint()\n\n def transpose(self):\n \"\"\" Transpose this linear operator.\n Returns a LinearOperator that represents the transpose of this one.\n Can be abbreviated self.T instead of self.transpose().\n \"\"\"\n return self._transpose()\n\n def t(self):\n \"\"\" Transpose this linear operator. \"\"\"\n return self.transpose()\n\n def _adjoint(self):\n \"\"\" Default implementation of _adjoint; defers to rmatvec. \"\"\"\n return _AdjointLinearOperator(self)\n\n def _transpose(self):\n \"\"\" Default implementation of _transpose; defers to rmatvec + conj\"\"\"\n return _TransposedLinearOperator(self)\n\n def invt(self):\n \"\"\" Default implementation of inverse transpose; defers to inv + T \"\"\"\n return (self ** -1).transpose()\n\n def to_dense(self):\n \"\"\" Default implementation of to_dense which produces the dense\n matrix corresponding to the given lazy matrix. Defaults to\n multiplying by the identity \"\"\"\n return [email protected](self.size(-1), device=self.device)\n\n def to(self, device):\n \"\"\" Move this linear operator to a new device. \"\"\"\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "ConcatLazy", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class ConcatLazy(LinearOperator):\n \"\"\" Produces a linear operator equivalent to concatenating\n a collection of matrices Ms along axis=0 \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n assert all(M.size(0) == Ms[0].size(0) for M in Ms),\\\n f\"Trying to concatenate matrices of different sizes {[M.shape for M in Ms]}\"\n shape = (sum(M.size(0) for M in Ms), Ms[0].size(1))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matmat(self, V):\n return torch.cat([M@V for M in self.Ms])\n\n def _rmatmat(self, V):\n Vs = torch.chunk(V, len(self.Ms))\n return sum(Mi.t()@Vi for Mi, Vi in zip(self.Ms, Vs))\n\n def to_dense(self):\n dense_Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return torch.cat(dense_Ms)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "I", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class I(LinearOperator):\n \"\"\" Identity operator. \"\"\"\n\n def __init__(self, d, device=None):\n super().__init__()\n shape = (d, d)\n self.init(None, shape, device)\n\n def _matmat(self, V): # (c,k)\n return V\n\n def _matvec(self, v):\n return v\n\n def _adjoint(self):\n return self\n\n def invt(self):\n return self" }, { "identifier": "lazify", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazify(x):\n \"\"\" Convert a tensor LinearOperator. \"\"\"\n if isinstance(x, LinearOperator):\n return x\n if torch.is_tensor(x):\n return Lazy(x)\n raise NotImplementedError" }, { "identifier": "densify", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def densify(x):\n \"\"\" Convert a LinearOperator to a dense tensor. \"\"\"\n if isinstance(x, LinearOperator):\n return x.to_dense()\n if torch.is_tensor(x):\n return x\n raise NotImplementedError" }, { "identifier": "LazyJVP", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyJVP(LinearOperator):\n \"\"\" Lazy Jacobian-vector product. \"\"\"\n\n def __init__(self, operator_fn, X, TX):\n super().__init__()\n self.operator_fn = operator_fn\n self.X = X\n self.TX = TX\n self.init(torch.float, operator_fn(X).shape, X.device)\n self.to(self.device)\n\n def vjp(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x)@v, [self.X], [self.TX])[1]\n\n def vjp_T(self, v):\n \"\"\" Computes the vector-Jacobian product \"\"\"\n return torch.autograd.functional.jvp(\n lambda x: self.operator_fn(x).t()@v, [self.X], [self.TX])[1]\n\n def _matmat(self, V):\n return self.vjp(V)\n\n def _matvec(self, v):\n return self.vjp(v)\n\n def _rmatmat(self, V):\n return self.vjp_T(V)\n\n def to(self, device):\n self.X = self.X.to(device)\n self.TX = self.TX.to(device)\n self.device = self.X.device\n return self" }, { "identifier": "LazyPerm", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyPerm(LinearOperator):\n \"\"\" Lazy permutation. \"\"\"\n\n def __init__(self, perm):\n super().__init__()\n self.perm = perm\n shape = (len(perm), len(perm))\n self.init(None, shape, perm.device)\n\n def _matmat(self, V):\n return V[self.perm]\n\n def _matvec(self, v):\n return v[self.perm]\n\n def _adjoint(self):\n return LazyPerm(torch.argsort(self.perm))\n\n def invt(self):\n return self\n\n def to(self, device):\n self.perm = self.perm.to(device)\n self.device = self.perm.device\n return self" }, { "identifier": "LazyDirectSum", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyDirectSum(LinearOperator):\n \"\"\" Lazy direct sum. \"\"\"\n\n def __init__(self, Ms, multiplicities=None):\n super().__init__()\n self.Ms = Ms\n self.multiplicities = [1 for _ in Ms] if multiplicities is None else multiplicities\n shape = (sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)),\n sum(Mi.size(0)*c for Mi, c in zip(Ms, multiplicities)))\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return lazy_direct_matmat(v, self.Ms, self.multiplicities)\n\n def _matmat(self, V): # (n,k)\n return lazy_direct_matmat(V, self.Ms, self.multiplicities)\n\n def _adjoint(self):\n return LazyDirectSum([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyDirectSum([M.invt() for M in self.Ms])\n\n def to_dense(self):\n Ms_all = [M for M, c in zip(self.Ms, self.multiplicities)\n for _ in range(c)]\n Ms_all = [Mi.to_dense() if isinstance(Mi, LinearOperator)\n else Mi for Mi in Ms_all]\n return torch.block_diag(*Ms_all)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKron", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKron(LinearOperator):\n \"\"\" Lazy tensor product. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n device = get_device(Ms)\n self.init(None, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n eV = torch.movedim(MeV_front, 0, i)\n return eV.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKron([Mi.t() for Mi in self.Ms])\n\n def invt(self):\n return LazyKron([M.invt() for M in self.Ms])\n\n def to_dense(self):\n self.to(self.device)\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(torch.kron, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "LazyKronsum", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "class LazyKronsum(LinearOperator):\n \"\"\" Lazy tensor sum. \"\"\"\n\n def __init__(self, Ms):\n super().__init__()\n self.Ms = Ms\n shape = product([Mi.size(0) for Mi in Ms]), product([Mi.size(1) for Mi in Ms])\n dtype = torch.float\n device = get_device(Ms)\n self.init(dtype, shape, device)\n self.to(self.device)\n\n def _matvec(self, v):\n return self._matmat(v).reshape(-1)\n\n def _matmat(self, V):\n eV = V.reshape(*[Mi.size(-1) for Mi in self.Ms], -1)\n out = 0*eV\n for i, M in enumerate(self.Ms):\n eV_front = torch.movedim(eV, i, 0)\n M, eV_front = dtype_cast(M, eV_front)\n MeV_front = (M@eV_front.reshape(M.size(-1), -1)).reshape(M.size(0), *eV_front.shape[1:])\n out, MeV_front = dtype_cast(out, MeV_front)\n out += torch.movedim(MeV_front, 0, i)\n return out.reshape(self.size(0), eV.size(-1))\n\n def _adjoint(self):\n return LazyKronsum([Mi.t() for Mi in self.Ms])\n\n def to_dense(self):\n Ms = [M.to_dense() if isinstance(M, LinearOperator) else M for M in self.Ms]\n return reduce(kronsum, Ms)\n\n def __new__(cls, Ms):\n if len(Ms) == 1:\n return Ms[0]\n return super().__new__(cls)\n\n # could also be implemented as follows,\n # but fusing the sum into a single linearOperator is faster\n # def lazy_kronsum(Ms):\n # n = len(Ms)\n # lprod = np.cumprod([1]+[mi.size(-1) for mi in Ms])\n # rprod = np.cumprod([1]+[mi.size(-1) for mi in reversed(Ms)])[::-1]\n # return reduce(lambda a,b: a+b,[lazy_kron([I(lprod[i]),Mi,I(rprod[i+1])])\n # for i,Mi in enumerate(Ms)])\n\n def to(self, device):\n self.Ms = [M.to(device) for M in self.Ms]\n self.device = torch.empty(0).to(device).device\n return self" }, { "identifier": "lazy_direct_matmat", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def lazy_direct_matmat(v, Ms, mults):\n \"\"\" Computes the matrix-vector product of a direct sum of matrices\n with a vector. \"\"\"\n k = v.size(1) if len(v.shape) > 1 else 1\n i = 0\n y = []\n for M, multiplicity in zip(Ms, mults):\n i_end = i+multiplicity*M.size(-1)\n elems = M@v[i:i_end][None].reshape(k*multiplicity, M.size(-1)).t()\n y.append(elems.t().reshape(k, multiplicity*M.size(0)).t())\n i = i_end\n y = torch.cat(y) # concatenate over rep axis\n return y" }, { "identifier": "product", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/linear_operators.py", "snippet": "def product(c):\n \"\"\" Product of a list of numbers. \"\"\"\n return reduce(lambda a, b: a*b, c)" }, { "identifier": "orthogonal_complement", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def orthogonal_complement(proj):\n \"\"\" Computes the orthogonal complement to a given matrix proj\"\"\"\n _, S, Vh = torch.linalg.svd(proj, full_matrices=True)\n rank = (S > 1e-5).sum()\n return Vh[rank:].conj().t()" }, { "identifier": "krylov_constraint_solve", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def krylov_constraint_solve(C, tol=1e-5):\n \"\"\" Computes the solution basis Q for the linear constraint CQ=0 and QᵀQ=I\n up to specified tolerance with C expressed as a LinearOperator. \"\"\"\n r = 5\n if C.size(0)*r*2 > 2e9:\n raise RuntimeError(f\"Solns for contraints {C.shape} too large to fit in memory\")\n found_rank = 5\n while found_rank == r:\n r *= 2 # Iterative doubling of rank until large enough to include the full solution space\n if C.size(0)*r > 2e9:\n logging.error(\"Hit memory limits, switching to \"\n \"sample equivariant subspace of size %r\", found_rank)\n break\n Q = krylov_constraint_solve_upto_r(C, r, tol)\n found_rank = Q.size(-1)\n return Q" }, { "identifier": "get_device", "path": "ParticleScatter/exp_lorentz/emlp-pytorch/emlp_pytorch/reps/utils.py", "snippet": "def get_device(operators, devices=None):\n \"\"\" Returns the device of the first operator that has a device attribute. \"\"\"\n if devices is None:\n devices = []\n for obj in operators:\n if obj is not None and hasattr(obj, 'device') and obj.device.type != 'cpu':\n return obj.device\n return torch.device('cpu')" } ]
import math import logging import itertools import torch from functools import lru_cache as cache, reduce from collections import defaultdict from plum import dispatch from torch import nn from ..groups import Group from .linear_operator_base import LinearOperator from .linear_operators import ConcatLazy, I, lazify, densify, LazyJVP, LazyPerm, \ LazyDirectSum, LazyKron, LazyKronsum, lazy_direct_matmat, product from .utils import orthogonal_complement, krylov_constraint_solve, get_device
11,304
def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps} device = self.G.device if self.G is not None else get_device(list(Qs.values())) Qs = {rep: (Q.to(device).to(torch.float) if torch.is_tensor(Q) else Q) \ for rep, Q in Qs.items()} active_dims = sum(self.reps[rep]*Qs[rep].size(-1) for rep in Qs.keys()) multiplicities = self.reps.values() def lazy_Q(array):
""" The base Representation class. """ class Rep(nn.Module): """ The base Representation class. Representation objects formalize the vector space V on which the group acts, the group representation matrix ρ(g), and the Lie Algebra representation dρ(A) in a single object. Representations act as types for vectors coming from V. These types can be manipulated and transformed with the built in operators ⊕,⊗,dual, as well as incorporating custom representations. Representation objects should be immutable. At minimum, new representations need to implement ``rho``, ``__str__``.""" def __init__(self): super().__init__() self.is_permutation = False self._size = None self.G = None def rho(self, M): """ Group representation of the matrix M of shape (d,d)""" raise NotImplementedError def drho(self, A): """ Lie Algebra representation of the matrix A of shape (d,d)""" In = torch.eye(A.size(0), dtype=A.dtype, device=A.device) return LazyJVP(self.rho, In, A) def forward(self, G): """ Instantiate (nonconcrete) representation with a symmetry group (forward) """ raise NotImplementedError def __str__(self): return repr(self) def __repr__(self): raise NotImplementedError def __eq__(self, other): if type(self) is not type(other): # pylint: disable=unidiomatic-typecheck return False return self.__hash__() == other.__hash__() def __hash__(self): raise NotImplementedError def size(self): """ Dimension dim(V) of the representation """ if self._size is not None: return self._size if self.concrete() and isinstance(self.G, Group): self._size = self.rho(self.G.sample()).size(-1) return self._size raise NotImplementedError def canonicalize(self): """ An optional method to convert the representation into a canonical form in order to reuse equivalent solutions in the solver. Should return both the canonically ordered representation, along with a permutation which can be applied to vectors of the current representation to achieve that ordering. """ # return canonicalized rep return self, torch.arange(self.size()) def rho_dense(self, M): """ A convenience function which returns rho(M) as a dense matrix.""" return densify(self.rho(M)) def drho_dense(self, A): """ A convenience function which returns drho(A) as a dense matrix.""" return densify(self.drho(A)) def constraint_matrix(self): """ Constructs the equivariance constrant matrix (lazily) by concatenating the constraints (ρ(hᵢ)-I) for i=1,...M and dρ(Aₖ) for k=1,..,D from the generators of the symmetry group. """ n = self.size() constraints = [] constraints.extend([lazify(self.rho(h)).to(self.G.device)-I(n, device=self.G.device) \ for h in self.G.discrete_generators]) constraints.extend([lazify(self.drho(A)).to(self.G.device) for A in self.G.lie_algebra]) return ConcatLazy(constraints) if constraints else lazify( torch.zeros((1, n), device=self.G.device)) solcache = {} def equivariant_basis(self): """ Computes the equivariant solution basis for the given representation of size N. Canonicalizes problems and caches solutions for reuse. Output [Q (N,r)] """ if self == Scalar: return torch.ones((1, 1), device=self.G.device) canon_rep, perm = self.canonicalize() invperm = torch.argsort(perm) if canon_rep not in self.solcache: logging.info("%r cache miss", canon_rep) logging.info("Solving basis for %r%s", self, f", for G={self.G}" if self.G is not None else "") C_lazy = canon_rep.constraint_matrix() if C_lazy.size(0)*C_lazy.size(1) > 3e7: # Too large to use SVD result = krylov_constraint_solve(C_lazy) else: C_dense = C_lazy.to_dense() result = orthogonal_complement(C_dense) self.solcache[canon_rep] = result return self.solcache[canon_rep][invperm] def equivariant_projector(self): """ Computes the (lazy) projection matrix P=QQᵀ that projects to the equivariant basis.""" Q = self.equivariant_basis() Q_lazy = lazify(Q) P = Q_lazy@Q_lazy.H() return P def concrete(self): """ Concreteness """ return isinstance(self.G, Group) def __add__(self, other): """ Direct sum (⊕) of representations. """ if isinstance(other, int): if other == 0: return self return self+other*Scalar if both_concrete(self, other): return SumRep(self, other) return DeferredSumRep(self, other) def __radd__(self, other): if isinstance(other, int): if other == 0: return self return other*Scalar+self return NotImplemented def __mul__(self, other): """ Tensor sum (⊗) of representations. """ return mul_reps(self, other) def __rmul__(self, other): return mul_reps(other, self) def __pow__(self, other): """ Iterated tensor product. """ assert isinstance(other, int), \ f"Power only supported for integers, not {type(other)}" assert other >= 0, f"Negative powers {other} not supported" return reduce(lambda a, b: a*b, other*[self], Scalar) def __rshift__(self, other): """ Linear maps from self -> other """ return other*self.t() def __lshift__(self, other): """ Linear maps from other -> self """ return self*other.t() def __lt__(self, other): """ less than defined to disambiguate ordering multiple different representations. Canonical ordering is determined first by Group, then by size, then by hash""" if other == Scalar: return False try: if self.G < other.G: return True if self.G > other.G: return False except (AttributeError, TypeError): pass if self.size() < other.size(): return True if self.size() > other.size(): return False return hash(self) < hash(other) # For sorting purposes only def t(self): """ Dual representation V*, rho*, drho*.""" if isinstance(self.G, Group) and self.G.is_orthogonal: return self return Dual(self) @dispatch def mul_reps(ra, rb: int): """ Product of a scalar and a representation. """ if rb == 1: return ra if rb == 0: return 0 if ra.concrete(): return SumRep(*(rb*[ra])) return DeferredSumRep(*(rb*[ra])) @dispatch def mul_reps(ra: int, rb): # pylint: disable=function-redefined """ Product of a scalar and a representation. """ return mul_reps(rb, ra) # pylint: disable=W1114:arguments-out-of-order class ScalarRep(Rep): """ The trivial representation of the group G. """ def __init__(self, G=None): super().__init__() self.G = G self.is_permutation = True def forward(self, G): self.G = G return self def size(self): return 1 def canonicalize(self): return self, torch.zeros(1, dtype=torch.long) def __repr__(self): return "V⁰" def t(self): return self def rho(self, M): return torch.eye(1, device=self.G.device) def drho(self, A): return 0*torch.eye(1, device=self.G.device) def __hash__(self): return 0 def __eq__(self, other): return isinstance(other, ScalarRep) def __mul__(self, other): if isinstance(other, int): return super().__mul__(other) return other def __rmul__(self, other): if isinstance(other, int): return super().__rmul__(other) return other def concrete(self): return True class Base(Rep): """ Base representation V of a group.""" def __init__(self, G=None): super().__init__() self.G = G if G is not None: self.is_permutation = G.is_permutation def forward(self, G): return self.__class__(G) def rho(self, M): if isinstance(self.G, Group) and isinstance(M, dict): M = M[self.G] return M def drho(self, A): if isinstance(self.G, Group) and isinstance(A, dict): A = A[self.G] return A def size(self): assert self.G is not None, f"must know G to find size for rep={self}" return self.G.d def __repr__(self): return "V" def __hash__(self): return hash((type(self), self.G)) def __eq__(self, other): return type(other) is type(self) and self.G == other.G def __lt__(self, other): if isinstance(other, Dual): return True return super().__lt__(other) class Dual(Rep): """ Dual representation V*, rho*, drho*.""" def __init__(self, rep): super().__init__() self.rep = rep self.G = rep.G if hasattr(rep, "is_permutation"): self.is_permutation = rep.is_permutation def forward(self, G): return self.rep(G).t() def rho(self, M): rho = self.rep.rho(M) rhoinvt = rho.invt() if isinstance(rho, LinearOperator) else torch.linalg.inv(rho).t() return rhoinvt def drho(self, A): return -self.rep.drho(A).t() def __repr__(self): return repr(self.rep)+"*" def t(self): return self.rep def __eq__(self, other): return type(other) is type(self) and self.rep == other.rep def __hash__(self): return hash((type(self), self.rep)) def __lt__(self, other): if other == self.rep: return False return super().__lt__(other) def size(self): return self.rep.size() # Alias V or Vector for an instance of the Base representation of a group V = Vector = Base() # An instance of the Scalar representation, equivalent to V**0 Scalar = ScalarRep() def T(p, q=0, G=None): """ A convenience function for creating rank (p,q) tensors.""" return (V**p*V.t()**q)(G) def bilinear_weights(out_rep, in_rep): """ Bilinear weights for a linear operator from in_rep to out_rep. """ # TODO: replace lazy_projection function with LazyDirectSum LinearOperator W_rep, W_perm = (in_rep >> out_rep).canonicalize() # TODO: possible bug when in_rep and out_rep are both non sumreps? investigate inv_perm = torch.argsort(W_perm) mat_shape = out_rep.size(), in_rep.size() x_rep = in_rep W_multiplicities = W_rep.reps x_multiplicities = x_rep.reps x_multiplicities = {rep: n for rep, n in x_multiplicities.items() if rep != Scalar} def nelems(nx, rep): return min(nx, rep.size()) active_dims = sum(W_multiplicities.get(rep, 0)*nelems(n, rep) for rep, n in x_multiplicities.items()) reduced_indices_dict = {rep: ids[torch.randint( len(ids), size=(nelems(len(ids), rep),))].reshape(-1) for rep, ids in x_rep.as_dict(torch.arange(x_rep.size())).items()} # Apply the projections for each rank, concatenate, and permute back to orig rank order # (r,), (*c) # TODO: find out why backwards of this function is so slow def lazy_projection(params, x): bshape = x.shape[:-1] x = x.reshape(-1, x.size(-1)) bs = x.size(0) i = 0 Ws = [] for rep, W_mult in W_multiplicities.items(): if rep not in x_multiplicities: Ws.append(torch.zeros((bs, W_mult*rep.size()), device=x.device)) continue x_mult = x_multiplicities[rep] n = nelems(x_mult, rep) i_end = i+W_mult*n bids = reduced_indices_dict[rep] bilinear_params = params[i:i_end].reshape(W_mult, n) # bs,nK-> (nK,bs) i = i_end # (bs,W_mult,d^r) = (W_mult,n)@(n,d^r,bs) bilinear_elems = bilinear_params@x[..., bids].t().reshape(n, rep.size()*bs) bilinear_elems = bilinear_elems.reshape(W_mult*rep.size(), bs).t() Ws.append(bilinear_elems) Ws = torch.cat(Ws, axis=-1) # concatenate over rep axis # reorder to original rank ordering return Ws[..., inv_perm].reshape(*bshape, *mat_shape) return active_dims, lazy_projection class SumRep(Rep): """ A sum of representations, e.g. V+V.T. """ def __init__(self, *reps, extra_perm=None, skip_init=False): """ Constructs a tensor type based on a list of tensor ranks and possibly the symmetry generators gen.""" super().__init__() if skip_init: return # Integers can be used as shorthand for scalars. reps = [SumRepFromCollection({Scalar: rep}) if isinstance(rep, int) else \ rep for rep in reps] # Get reps and permutations reps, perms = zip(*[rep.canonicalize() for rep in reps]) rep_counters = [rep.reps if isinstance(rep, SumRep) else {rep: 1} for rep in reps] # Combine reps and permutations: ∑_a + ∑_b = ∑_{a∪b} self.reps, perm = self.compute_canonical(rep_counters, perms) self.perm = extra_perm[perm] if extra_perm is not None else perm self.invperm = torch.argsort(self.perm) self.canonical = (self.perm == torch.arange(len(self.perm))).all() self.is_permutation = all(rep.is_permutation for rep in self.reps.keys()) def size(self): return sum(rep.size()*count for rep, count in self.reps.items()) def rho(self, M): rhos = [rep.rho(M) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(rhos, multiplicities)@LazyPerm(self.perm) def drho(self, A): drhos = [rep.drho(A) for rep in self.reps] multiplicities = self.reps.values() return LazyPerm(self.invperm)@LazyDirectSum(drhos, multiplicities)@LazyPerm(self.perm) def __eq__(self, other): return self.reps == other.reps and (self.perm == other.perm).all() def __hash__(self): assert self.canonical return hash(tuple(self.reps.items())) def t(self): """ only swaps to adjoint representation, does not reorder elems""" return SumRep(*[rep.t() for rep, c in self.reps.items() for _ in range(c)], extra_perm=self.perm) def __repr__(self): return "+".join(f"{count if count > 1 else ''}{repr(rep)}" for rep, count in self.reps.items()) def canonicalize(self): """Returns a canonically ordered rep with order np.arange(self.size()) and the permutation which achieves that ordering""" return SumRepFromCollection(self.reps), self.perm def forward(self, G): return SumRepFromCollection({rep(G): c for rep, c in self.reps.items()}, perm=self.perm) def concrete(self): return True def equivariant_basis(self): """ Overrides default implementation with a more efficient version which decomposes the constraints across the sum.""" Qs = {rep: rep.equivariant_basis() for rep in self.reps} device = self.G.device if self.G is not None else get_device(list(Qs.values())) Qs = {rep: (Q.to(device).to(torch.float) if torch.is_tensor(Q) else Q) \ for rep, Q in Qs.items()} active_dims = sum(self.reps[rep]*Qs[rep].size(-1) for rep in Qs.keys()) multiplicities = self.reps.values() def lazy_Q(array):
return lazy_direct_matmat(array, Qs.values(), multiplicities)[self.invperm]
11
2023-11-01 07:19:02+00:00
16k